diff --git a/Cargo.lock b/Cargo.lock index 47f30c51..754654d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3082,6 +3082,7 @@ dependencies = [ "async-trait", "bincode", "committable", + "derive_more 1.0.0", "futures", "hotshot", "hotshot-builder-api", @@ -4739,6 +4740,7 @@ dependencies = [ "sha2 0.10.8", "tagged-base64", "tide-disco", + "tokio", "tracing", "url", "vbs", diff --git a/crates/legacy/Cargo.toml b/crates/legacy/Cargo.toml index a8f5d048..c342bf35 100644 --- a/crates/legacy/Cargo.toml +++ b/crates/legacy/Cargo.toml @@ -14,6 +14,7 @@ async-std = { workspace = true, features = ["unstable", "attributes"] } async-trait = { workspace = true } bincode = { workspace = true } committable = { workspace = true } +derive_more = { workspace = true, features = ["deref", "deref_mut", "debug"] } futures = { workspace = true } hotshot = { workspace = true } hotshot-builder-api = { workspace = true } diff --git a/crates/legacy/src/block_size_limits.rs b/crates/legacy/src/block_size_limits.rs new file mode 100644 index 00000000..b1237bcc --- /dev/null +++ b/crates/legacy/src/block_size_limits.rs @@ -0,0 +1,113 @@ +use std::time::{Duration, Instant}; + +/// Adjustable limits for block size ceiled by +/// maximum block size allowed by the protocol +#[derive(Debug, Clone)] +pub struct BlockSizeLimits { + // maximum block size allowed by the protocol + pub protocol_max_block_size: u64, + // estimated maximum block size we can build in time + pub max_block_size: u64, + pub increment_period: Duration, + pub last_block_size_increment: Instant, +} + +impl BlockSizeLimits { + /// Never go lower than 10 kilobytes + pub const MAX_BLOCK_SIZE_FLOOR: u64 = 10_000; + /// When adjusting max block size, it will be decremented or incremented + /// by current value / `MAX_BLOCK_SIZE_CHANGE_DIVISOR` + pub const MAX_BLOCK_SIZE_CHANGE_DIVISOR: u64 = 10; + + pub fn new(protocol_max_block_size: u64, increment_period: Duration) -> Self { + Self { + protocol_max_block_size, + max_block_size: protocol_max_block_size, + increment_period, + last_block_size_increment: Instant::now(), + } + } + + /// If increment period has elapsed or `force` flag is set, + /// increment [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::protocol_max_block_size`] as a ceiling + pub fn try_increment_block_size(&mut self, force: bool) { + if force || self.last_block_size_increment.elapsed() >= self.increment_period { + self.max_block_size = std::cmp::min( + self.max_block_size + + self + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + self.protocol_max_block_size, + ); + self.last_block_size_increment = Instant::now(); + } + } + + /// Decrement [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::MAX_BLOCK_SIZE_FLOOR`] as a floor + pub fn decrement_block_size(&mut self) { + self.max_block_size = std::cmp::max( + self.max_block_size + - self + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + Self::MAX_BLOCK_SIZE_FLOOR, + ); + } +} + +#[cfg(test)] +mod tests { + use marketplace_builder_shared::testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }; + + use super::*; + + #[test] + fn test_increment_block_size() { + let mut block_size_limits = + BlockSizeLimits::new(TEST_PROTOCOL_MAX_BLOCK_SIZE, Duration::from_millis(25)); + // Simulate decreased limits + block_size_limits.max_block_size = TEST_PROTOCOL_MAX_BLOCK_SIZE / 2; + + // Shouldn't increment, increment period hasn't passed yet + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size == TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + + // Should increment, increment period hasn't passed yet, but force flag is set + block_size_limits.try_increment_block_size(true); + assert!(block_size_limits.max_block_size > TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + let new_size = block_size_limits.max_block_size; + + std::thread::sleep(Duration::from_millis(30)); + + // Should increment, increment period has passed + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size > new_size); + } + + #[test] + fn test_decrement_block_size() { + let mut block_size_limits = BlockSizeLimits::new( + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert!(block_size_limits.max_block_size < TEST_PROTOCOL_MAX_BLOCK_SIZE); + } + + #[test] + fn test_max_block_size_floor() { + let mut block_size_limits = BlockSizeLimits::new( + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + 1, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert_eq!( + block_size_limits.max_block_size, + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + ); + } +} diff --git a/crates/legacy/src/builder_state.rs b/crates/legacy/src/builder_state.rs index bc4d79eb..dec8e90a 100644 --- a/crates/legacy/src/builder_state.rs +++ b/crates/legacy/src/builder_state.rs @@ -10,14 +10,12 @@ use hotshot_types::{ utils::BuilderCommitment, vid::{VidCommitment, VidPrecomputeData}, }; +use marketplace_builder_shared::block::ReceivedTransaction; use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; +use marketplace_builder_shared::utils::LegacyCommit; use committable::Commitment; -use crate::{ - service::{GlobalState, ReceivedTransaction}, - LegacyCommit, -}; use async_broadcast::broadcast; use async_broadcast::Receiver as BroadcastReceiver; use async_broadcast::Sender as BroadcastSender; @@ -42,13 +40,6 @@ use std::{collections::hash_map::Entry, time::Duration}; pub type TxTimeStamp = u128; -/// Enum to hold the different sources of the transaction -#[derive(Clone, Debug, PartialEq)] -pub enum TransactionSource { - External, // txn from the external source i.e private mempool - HotShot, // txn from the HotShot network i.e public mempool -} - /// Decide Message to be put on the decide channel #[derive(Clone, Debug)] pub struct DecideMessage { @@ -119,892 +110,892 @@ pub struct DAProposalInfo { /// of them, following the proposal that contains transactions. pub(crate) const ALLOW_EMPTY_BLOCK_PERIOD: u64 = 3; -#[derive(Debug)] -pub struct BuilderState { - /// Recent included txs set while building blocks - pub included_txns: HashSet>, - - /// Old txs to be garbage collected - pub included_txns_old: HashSet>, - - /// Expiring txs to be garbage collected - pub included_txns_expiring: HashSet>, - - /// txns currently in the `tx_queue` - pub txns_in_queue: HashSet>, - - /// filtered queue of available transactions, taken from `tx_receiver` - pub tx_queue: VecDeque>>, - - /// `da_proposal_payload_commit` to (`da_proposal`, `node_count`) - #[allow(clippy::type_complexity)] - pub da_proposal_payload_commit_to_da_proposal: - HashMap<(BuilderCommitment, Types::View), DAProposalInfo>, - - /// `quorum_proposal_payload_commit` to `quorum_proposal` - #[allow(clippy::type_complexity)] - pub quorum_proposal_payload_commit_to_quorum_proposal: - HashMap<(BuilderCommitment, Types::View), Arc>>>, - - /// Spawned-from references to the parent block. - pub parent_block_references: ParentBlockReferences, - - // Channel Receivers for the HotShot events, Tx_receiver could also receive the external transactions - /// decide receiver - pub decide_receiver: BroadcastReceiver>, - - /// da proposal receiver - pub da_proposal_receiver: BroadcastReceiver>, - - /// quorum proposal receiver - pub quorum_proposal_receiver: BroadcastReceiver>, - - /// channel receiver for the block requests - pub req_receiver: BroadcastReceiver>, - - /// incoming stream of transactions - pub tx_receiver: BroadcastReceiver>>, - - /// global state handle, defined in the service.rs - pub global_state: Arc>>, - - /// locally spawned builder Commitements - pub builder_commitments: HashSet<(BuilderStateId, BuilderCommitment)>, - - /// timeout for maximising the txns in the block - pub maximize_txn_capture_timeout: Duration, - - /// constant fee that the builder will offer per byte of data sequenced - pub base_fee: u64, - - /// validated state that is required for a proposal to be considered valid. Needed for the - /// purposes of building a valid block payload within the sequencer. - pub validated_state: Arc, - - /// instance state to enfoce `max_block_size` - pub instance_state: Arc, - - /// txn garbage collection every duration time - pub txn_garbage_collect_duration: Duration, - - /// time of next garbage collection for txns - pub next_txn_garbage_collect_time: Instant, - - /// `allow_empty_block_until` is a variable that dictates the time until which - /// a builder should stop producing empty blocks. This is done specifically - /// to allow for faster finalization of previous blocks that have had - /// transactions included in them. - pub allow_empty_block_until: Option, -} - -/// [`best_builder_states_to_extend`] is a utility function that is used to -/// in order to determine which [`BuilderState`]s are the best fit to extend -/// from. -/// -/// This function is designed to inspect the current state of the global state -/// in order to determine which [`BuilderState`]s are the best fit to extend -/// from. We only want to use information from [`GlobalState`] as otherwise -/// we would have some insider knowledge unique to our specific [`BuilderState`] -/// rather than knowledge that is available to all [`BuilderState`]s. In fact, -/// in order to ensure this, this function lives outside of the [`BuilderState`] -/// itself. -/// -/// In an ideal circumstance the best [`BuilderState`] to extend from is going to -/// be the one that is immediately preceding the [`QuorumProposal`] that we are -/// attempting to extend from. However, if all we know is the view number of -/// the [`QuorumProposal`] that we are attempting to extend from, then we may end -/// up in a scenario where we have multiple [`BuilderState`]s that are all equally -/// valid to extend from. When this happens, we have the potential for a data -/// race. -/// -/// The primary cause of this has to due with the interface of the -/// [`ProxyGlobalState`](crate::service::ProxyGlobalState)'s API. In general, -/// we want to be able to retrieve a [`BuilderState`] via the [`BuilderStateId`]. -/// The [`BuilderStateId`] only references a [`ViewNumber`](hotshot_types::data::ViewNumber) -/// and a [`VidCommitment`] While this information is available in the [`QuorumProposal`], -/// it only helps us to rule out [`BuilderState`]s that already exist. -/// It does **NOT** help us to pick a [`BuilderState`] that is the best fit to extend from. -/// -/// This is where the `justify_qc` comes in to consideration. The `justify_qc` -/// contains the previous [`ViewNumber`](hotshot_types::data::ViewNumber) that is -/// being extended from, and in addition it also contains the previous [`Commitment>`] -/// that is being built on top of. Since our [`BuilderState`]s store identifying -/// information that contains this same `leaf_commit` we can compare these -/// directly to ensure that we are extending from the correct [`BuilderState`]. -/// -/// This function determines the best [`BuilderState`] in the following steps: -/// -/// 1. If we have a [`BuilderState`] that is already spawned for the current -/// [`QuorumProposal`], then we should should return no states, as one already -/// exists. This will prevent us from attempting to spawn duplicate -/// [`BuilderState`]s. -/// 2. Attempt to find all [`BuilderState`]s that are recorded within -/// [`GlobalState`] that have matching view number and leaf commitments. There -/// *should* only be one of these. But all would be valid extension points. -/// 3. If we can't find any [`BuilderState`]s that match the view number -/// and leaf commitment, then we should return for the maximum stored view -/// number that is smaller than the current [`QuorumProposal`]. -/// 4. If there is is only one [`BuilderState`] stored in the [`GlobalState`], then -/// we should return that [`BuilderState`] as the best fit. -/// 5. If none of the other criteria match, we return an empty result as it is -/// unclear what to do in this case. -/// -/// > Note: Any time this function returns more than a single entry in its -/// > [HashSet] result, there is a potential for a race condition. This is -/// > because there are multiple [BuilderState]s that are equally valid to -/// > extend from. This race could be avoided by just picking one of the -/// > entries in the resulting [HashSet], but this is not done here in order -/// > to allow us to highlight the possibility of the race. -async fn best_builder_states_to_extend( - quorum_proposal: Arc>>, - global_state: Arc>>, -) -> HashSet> { - let current_view_number = quorum_proposal.data.view_number; - let current_commitment = quorum_proposal.data.block_header.payload_commitment(); - let current_builder_state_id = BuilderStateId:: { - parent_commitment: current_commitment, - parent_view: current_view_number, - }; - - let global_state_read_lock = global_state.read_arc().await; - - // The first step is to check if we already have a spawned [BuilderState]. - // If we do, then we should indicate that there is no best fit, as we - // don't want to spawn another [BuilderState]. - if global_state_read_lock - .spawned_builder_states - .contains_key(¤t_builder_state_id) - { - // We already have a spawned [BuilderState] for this proposal. - // So we should just ignore it. - return HashSet::new(); - } - - // Next we want to see if there is an immediate match for a [BuilderState] - // that we can extend from. This is the most ideal situation, as it - // implies that we are extending from the correct [BuilderState]. - // We do this by checking the `justify_qc` stored within the - // [QuorumProposal], and checking it against the current spawned - // [BuilderState]s - let justify_qc = &quorum_proposal.data.justify_qc; - let existing_states: HashSet<_> = global_state_read_lock - .spawned_builder_states - .iter() - .filter( - |(_, (parent_block_references, _))| match parent_block_references { - None => false, - Some(parent_block_references) => { - parent_block_references.leaf_commit == justify_qc.data.leaf_commit - && parent_block_references.view_number == justify_qc.view_number - } - }, - ) - .map(|(builder_state_id, _)| builder_state_id.clone()) - .collect(); - - // If we found any matching [BuilderState]s, then we should return them - // as the best fit. - if !existing_states.is_empty() { - return existing_states; - } - - // At this point, we don't have any "ideal" matches or scenarios. So we - // need to look for a suitable fall-back. The best fallback condition to - // start with is any [BuilderState] that has the maximum spawned view - // number whose value is smaller than the current [QuorumProposal]. - let maximum_stored_view_number_smaller_than_quorum_proposal = global_state_read_lock - .spawned_builder_states - .keys() - .map(|builder_state_id| *builder_state_id.parent_view) - .filter(|view_number| view_number < ¤t_view_number) - .max(); - - // If we have a maximum view number that meets our criteria, then we should - // return all [BuilderStateId]s that match this view number. - // This can lead to multiple [BuilderStateId]s being returned. - if let Some(maximum_stored_view_number_smaller_than_quorum_proposal) = - maximum_stored_view_number_smaller_than_quorum_proposal - { - // If we are the maximum stored view number smaller than the quorum - // proposal's view number, then we are the best fit. - let mut result = HashSet::new(); - for builder_state_id in - global_state_read_lock - .spawned_builder_states - .keys() - .filter(|builder_state_id| { - builder_state_id.parent_view.u64() - == maximum_stored_view_number_smaller_than_quorum_proposal - }) - { - result.insert(builder_state_id.clone()); - } - return result; - } - - // This is our last ditch effort to continue making progress. If there is - // only one [BuilderState] active, then we should return that as the best - // fit, as it will be the only way we can continue making progress with - // the builder. - if global_state_read_lock.spawned_builder_states.len() == 1 { - let mut result = HashSet::new(); - for builder_state_id in global_state_read_lock.spawned_builder_states.keys() { - result.insert(builder_state_id.clone()); - } - return result; - } - - // This implies that there are only larger [BuilderState]s active than - // the one we are. This is weird, it implies that some sort of time - // travel has occurred view-wise. It is unclear what to do in this - // situation. - - HashSet::new() -} - -impl BuilderState { - /// Utility method that attempts to determine whether - /// we are among the best [`BuilderState`]s to extend from. - async fn am_i_the_best_builder_state_to_extend( - &self, - quorum_proposal: Arc>>, - ) -> bool { - let best_builder_states_to_extend = - best_builder_states_to_extend(quorum_proposal.clone(), self.global_state.clone()).await; - - tracing::debug!( - "{}@{} thinks these are the best builder states to extend from: {:?} for proposal {}@{}", - self.parent_block_references.vid_commitment, - self.parent_block_references.view_number.u64(), - best_builder_states_to_extend - .iter() - .map(|builder_state_id| format!( - "{}@{}", - builder_state_id.parent_commitment, - builder_state_id.parent_view.u64() - )) - .collect::>(), - quorum_proposal.data.block_header.payload_commitment(), - quorum_proposal.data.view_number.u64(), - ); - - // We are a best fit if we are contained within the returned set of - // best [BuilderState]s to extend from. - best_builder_states_to_extend.contains(&BuilderStateId { - parent_commitment: self.parent_block_references.vid_commitment, - parent_view: self.parent_block_references.view_number, - }) - } - - /// processing the DA proposal - #[tracing::instrument(skip_all, name = "process da proposal", - fields(builder_parent_block_references = %self.parent_block_references))] - async fn process_da_proposal(&mut self, da_msg: DaProposalMessage) { - tracing::debug!( - "Builder Received DA message for view {:?}", - da_msg.proposal.data.view_number - ); - - // we do not have the option to ignore DA proposals if we want to be able to handle failed view reorgs. - - // If the respective builder state exists to handle the request - let proposal = da_msg.proposal.clone(); - - // get the view number and encoded txns from the da_proposal_data - let view_number = proposal.data.view_number; - let encoded_txns = &proposal.data.encoded_transactions; - - let metadata = &proposal.data.metadata; - - // form a block payload from the encoded transactions - let block_payload = - >::from_bytes(encoded_txns, metadata); - // get the builder commitment from the block payload - let payload_builder_commitment = block_payload.builder_commitment(metadata); - - tracing::debug!( - "Extracted builder commitment from the da proposal: {:?}", - payload_builder_commitment - ); - - // form the DA proposal info - let da_proposal_info = DAProposalInfo { - view_number, - proposal, - }; - - let std::collections::hash_map::Entry::Vacant(e) = self - .da_proposal_payload_commit_to_da_proposal - .entry((payload_builder_commitment.clone(), view_number)) - else { - tracing::debug!("Payload commitment already exists in the da_proposal_payload_commit_to_da_proposal hashmap, so ignoring it"); - return; - }; - - // if we have matching da and quorum proposals, we can skip storing the one, and remove - // the other from storage, and call build_block with both, to save a little space. - - let Entry::Occupied(quorum_proposal) = self - .quorum_proposal_payload_commit_to_quorum_proposal - .entry((payload_builder_commitment.clone(), view_number)) - else { - e.insert(da_proposal_info); - return; - }; - - let quorum_proposal = quorum_proposal.remove(); - - // if we have a matching quorum proposal - // if (this is the correct parent or - // (the correct parent is missing and this is the highest view)) - // spawn a clone - if quorum_proposal.data.view_number != view_number { - tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); - return; - } - - tracing::info!( - "Spawning a clone from process DA proposal for view number: {:?}", - view_number - ); - // remove this entry from quorum_proposal_payload_commit_to_quorum_proposal - self.quorum_proposal_payload_commit_to_quorum_proposal - .remove(&(payload_builder_commitment.clone(), view_number)); - self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) - .await; - } - - /// processing the quorum proposal - //#[tracing::instrument(skip_all, name = "Process Quorum Proposal")] - #[tracing::instrument(skip_all, name = "process quorum proposal", - fields(builder_parent_block_references = %self.parent_block_references))] - async fn process_quorum_proposal(&mut self, quorum_msg: QuorumProposalMessage) { - tracing::debug!( - "Builder Received Quorum proposal message for view {:?}", - quorum_msg.proposal.data.view_number - ); - - // Two cases to handle: - // Case 1: Bootstrapping phase - // Case 2: No intended builder state exist - // To handle both cases, we can have the highest view number builder state running - // and only doing the insertion if and only if intended builder state for a particulat view is not present - // check the presence of quorum_proposal.data.view_number-1 in the spawned_builder_states list - let quorum_proposal = &quorum_msg.proposal; - let view_number = quorum_proposal.data.view_number; - let payload_builder_commitment = quorum_proposal.data.block_header.builder_commitment(); - - tracing::debug!( - "Extracted payload builder commitment from the quorum proposal: {:?}", - payload_builder_commitment - ); - - let std::collections::hash_map::Entry::Vacant(e) = self - .quorum_proposal_payload_commit_to_quorum_proposal - .entry((payload_builder_commitment.clone(), view_number)) - else { - tracing::debug!("Payload commitment already exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, so ignoring it"); - return; - }; - - // first check whether vid_commitment exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, if yer, ignore it, otherwise validate it and later insert in - // if we have matching da and quorum proposals, we can skip storing the one, and remove the other from storage, and call build_block with both, to save a little space. - let Entry::Occupied(da_proposal) = self - .da_proposal_payload_commit_to_da_proposal - .entry((payload_builder_commitment.clone(), view_number)) - else { - e.insert(quorum_proposal.clone()); - return; - }; - - let da_proposal_info = da_proposal.remove(); - // remove the entry from the da_proposal_payload_commit_to_da_proposal hashmap - self.da_proposal_payload_commit_to_da_proposal - .remove(&(payload_builder_commitment.clone(), view_number)); - - // also make sure we clone for the same view number( check incase payload commitments are same) - if da_proposal_info.view_number != view_number { - tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); - return; - } - - tracing::info!( - "Spawning a clone from process quorum proposal for view number: {:?}", - view_number - ); - - self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) - .await; - } - - /// A helper function that is used by both [`BuilderState::process_da_proposal`] - /// and [`BuilderState::process_quorum_proposal`] to spawn a new [`BuilderState`] - /// that extends from the current [`BuilderState`]. - /// - /// This helper function also adds additional checks in order to ensure - /// that the [`BuilderState`] that is being spawned is the best fit for the - /// [`QuorumProposal`] that is being extended from. - async fn spawn_clone_that_extends_self( - &mut self, - da_proposal_info: DAProposalInfo, - quorum_proposal: Arc>>, - ) { - if !self - .am_i_the_best_builder_state_to_extend(quorum_proposal.clone()) - .await - { - tracing::debug!( - "{} is not the best fit for forking, {}@{}, so ignoring the quorum proposal, and leaving it to another BuilderState", - self.parent_block_references, - quorum_proposal.data.block_header.payload_commitment(), - quorum_proposal.data.view_number.u64(), - ); - return; - } - - let (req_sender, req_receiver) = broadcast(self.req_receiver.capacity()); - - tracing::debug!( - "extending BuilderState with a clone from {} with new proposal {}@{}", - self.parent_block_references, - quorum_proposal.data.block_header.payload_commitment(), - quorum_proposal.data.view_number.u64() - ); - // We literally fork ourselves - self.clone_with_receiver(req_receiver) - .spawn_clone(da_proposal_info, quorum_proposal.clone(), req_sender) - .await; - } - - /// processing the decide event - #[tracing::instrument(skip_all, name = "process decide event", - fields(builder_parent_block_references = %self.parent_block_references))] - async fn process_decide_event(&mut self, decide_msg: DecideMessage) -> Option { - // Exit out all the builder states if their parent_block_references.view_number is less than the latest_decide_view_number - // The only exception is that we want to keep the highest view number builder state active to ensure that - // we have a builder state to handle the incoming DA and quorum proposals - let decide_view_number = decide_msg.latest_decide_view_number; - - let retained_view_cutoff = self - .global_state - .write_arc() - .await - .remove_handles(decide_view_number); - if self.parent_block_references.view_number < retained_view_cutoff { - tracing::info!( - "Decide@{:?}; Task@{:?} exiting; views < {:?} being reclaimed", - decide_view_number.u64(), - self.parent_block_references.view_number.u64(), - retained_view_cutoff.u64(), - ); - return Some(Status::ShouldExit); - } - tracing::info!( - "Decide@{:?}; Task@{:?} not exiting; views >= {:?} being retained", - decide_view_number.u64(), - self.parent_block_references.view_number.u64(), - retained_view_cutoff.u64(), - ); - - Some(Status::ShouldContinue) - } - - // spawn a clone of the builder state - #[tracing::instrument(skip_all, name = "spawn_clone", - fields(builder_parent_block_references = %self.parent_block_references))] - async fn spawn_clone( - mut self, - da_proposal_info: DAProposalInfo, - quorum_proposal: Arc>>, - req_sender: BroadcastSender>, - ) { - let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); - - // We replace our parent_block_references with information from the - // quorum proposal. This is identifying the block that this specific - // instance of [BuilderState] is attempting to build for. - self.parent_block_references = ParentBlockReferences { - view_number: quorum_proposal.data.view_number, - vid_commitment: quorum_proposal.data.block_header.payload_commitment(), - leaf_commit: leaf.legacy_commit(), - builder_commitment: quorum_proposal.data.block_header.builder_commitment(), - }; - - let builder_state_id = BuilderStateId { - parent_commitment: self.parent_block_references.vid_commitment, - parent_view: self.parent_block_references.view_number, - }; - - { - // Let's ensure that we don't already have one of these BuilderStates - // running already. - - let global_state_read_lock = self.global_state.read_arc().await; - if global_state_read_lock - .spawned_builder_states - .contains_key(&builder_state_id) - { - tracing::warn!( - "Aborting spawn_clone, builder state already exists in spawned_builder_states: {:?}", - builder_state_id - ); - return; - } - } - - let encoded_txns = &da_proposal_info.proposal.data.encoded_transactions; - let metadata = &da_proposal_info.proposal.data.metadata; - - let block_payload = - >::from_bytes(encoded_txns, metadata); - let txn_commitments = block_payload.transaction_commitments(metadata); - - for tx in txn_commitments.iter() { - self.txns_in_queue.remove(tx); - } - - self.included_txns.extend(txn_commitments.iter()); - self.tx_queue - .retain(|tx| self.txns_in_queue.contains(&tx.commit)); - - if !txn_commitments.is_empty() { - self.allow_empty_block_until = Some(Types::View::new( - da_proposal_info.view_number.u64() + ALLOW_EMPTY_BLOCK_PERIOD, - )); - } - - // register the spawned builder state to spawned_builder_states in the global state - self.global_state.write_arc().await.register_builder_state( - BuilderStateId { - parent_commitment: self.parent_block_references.vid_commitment, - parent_view: self.parent_block_references.view_number, - }, - self.parent_block_references.clone(), - req_sender, - ); - - self.event_loop(); - } - - // build a block - #[tracing::instrument(skip_all, name = "build block", - fields(builder_parent_block_references = %self.parent_block_references))] - async fn build_block( - &mut self, - state_id: BuilderStateId, - ) -> Option> { - let timeout_after = Instant::now() + self.maximize_txn_capture_timeout; - let sleep_interval = self.maximize_txn_capture_timeout / 10; - while Instant::now() <= timeout_after { - self.collect_txns(timeout_after).await; - - if !self.tx_queue.is_empty() // we have transactions - || Instant::now() + sleep_interval > timeout_after - // we don't have time for another iteration - { - break; - } - - async_sleep(sleep_interval).await - } - - // should_prioritize_finalization is a flag that is used to determine - // whether we should return empty blocks or not. - - let should_prioritize_finalization = self - .allow_empty_block_until - .map(|until| state_id.parent_view < until) - .unwrap_or(false); - - if self.tx_queue.is_empty() && !should_prioritize_finalization { - // Don't build an empty block - return None; - } - - let max_block_size = self - .global_state - .read_arc() - .await - .block_size_limits - .max_block_size; - let transactions_to_include = self.tx_queue.iter().scan(0, |total_size, tx| { - let prev_size = *total_size; - *total_size += tx.len; - // We will include one transaction over our target block length - // if it's the first transaction in queue, otherwise we'd have a possible failure - // state where a single transaction larger than target block state is stuck in - // queue and we just build empty blocks forever - if *total_size >= max_block_size && prev_size != 0 { - None - } else { - Some(tx.tx.clone()) - } - }); - - let Ok((payload, metadata)) = - >::from_transactions( - transactions_to_include, - &self.validated_state, - &self.instance_state, - ) - .await - else { - tracing::warn!("build block, returning None"); - return None; - }; - - let builder_hash = payload.builder_commitment(&metadata); - // count the number of txns - let actual_txn_count = payload.num_transactions(&metadata); - - // Payload is empty despite us checking that tx_queue isn't empty earlier. - // - // This means that the block was truncated due to *sequencer* block length - // limits, which are different from our `max_block_size`. There's no good way - // for us to check for this in advance, so we detect transactions too big for - // the sequencer indirectly, by observing that we passed some transactions - // to `>::from_transactions`, but - // it returned an empty block. - // Thus we deduce that the first transaction in our queue is too big to *ever* - // be included, because it alone goes over sequencer's block size limit. - // We need to drop it and mark as "included" so that if we receive - // it again we don't even bother with it. - if actual_txn_count == 0 && !should_prioritize_finalization { - if let Some(txn) = self.tx_queue.pop_front() { - self.txns_in_queue.remove(&txn.commit); - self.included_txns.insert(txn.commit); - }; - return None; - } - - // insert the recently built block into the builder commitments - self.builder_commitments - .insert((state_id, builder_hash.clone())); - - let encoded_txns: Vec = payload.encode().to_vec(); - let block_size: u64 = encoded_txns.len() as u64; - let offered_fee: u64 = self.base_fee * block_size; - - // Get the number of nodes stored while processing the `claim_block_with_num_nodes` request - // or upon initialization. - let num_nodes = self.global_state.read_arc().await.num_nodes; - - let (trigger_send, trigger_recv) = oneshot(); - - // spawn a task to calculate the VID commitment, and pass the handle to the global state - // later global state can await on it before replying to the proposer - let (unbounded_sender, unbounded_receiver) = unbounded(); - #[allow(unused_must_use)] - async_spawn(async move { - let Ok(TriggerStatus::Start) = trigger_recv.recv().await else { - return; - }; - - let join_handle = - spawn_blocking(move || precompute_vid_commitment(&encoded_txns, num_nodes)); - #[cfg(async_executor_impl = "tokio")] - let (vidc, pre_compute_data) = join_handle.await.unwrap(); - #[cfg(async_executor_impl = "async-std")] - let (vidc, pre_compute_data) = join_handle.await; - unbounded_sender.send((vidc, pre_compute_data)).await; - }); - - tracing::info!( - "Builder view num {:?}, building block with {:?} txns, with builder hash {:?}", - self.parent_block_references.view_number, - actual_txn_count, - builder_hash - ); - - Some(BuildBlockInfo { - id: BlockId { - view: self.parent_block_references.view_number, - hash: builder_hash, - }, - block_size, - offered_fee, - block_payload: payload, - metadata, - vid_trigger: trigger_send, - vid_receiver: unbounded_receiver, - truncated: actual_txn_count < self.tx_queue.len(), - }) - } - - async fn process_block_request(&mut self, req: RequestMessage) { - // If a spawned clone is active then it will handle the request, otherwise the highest view num builder will handle - if req.state_id.parent_commitment != self.parent_block_references.vid_commitment - || req.state_id.parent_view != self.parent_block_references.view_number - { - tracing::debug!( - "Builder {:?} Requested Builder commitment does not match the built_from_view, so ignoring it", - self.parent_block_references.view_number - ); - return; - } - - let highest_view_num_builder_id = self - .global_state - .read_arc() - .await - .highest_view_num_builder_id - .clone(); - - if highest_view_num_builder_id.parent_view != self.parent_block_references.view_number { - tracing::debug!( - "Builder {:?} Requested Builder commitment does not match the highest_view_num_builder_id, so ignoring it", - self.parent_block_references.view_number - ); - return; - } - - tracing::info!( - "Request for parent {} handled by builder with view {:?}", - req.state_id, - self.parent_block_references.view_number, - ); - let response = self.build_block(req.state_id.clone()).await; - - let Some(response) = response else { - tracing::debug!("No response to send"); - return; - }; - - // form the response message - let response_msg = ResponseMessage { - builder_hash: response.id.hash.clone(), - block_size: response.block_size, - offered_fee: response.offered_fee, - }; - - let builder_hash = response.id.hash.clone(); - self.global_state.write_arc().await.update_global_state( - req.state_id.clone(), - response, - response_msg.clone(), - ); - - // ... and finally, send the response - if let Err(e) = req.response_channel.send(response_msg).await { - tracing::warn!( - "Builder {:?} failed to send response to {:?} with builder hash {:?}, Err: {:?}", - self.parent_block_references.view_number, - req, - builder_hash, - e - ); - return; - } - - tracing::info!( - "Builder {:?} Sent response to the request{:?} with builder hash {:?}", - self.parent_block_references.view_number, - req, - builder_hash - ); - } - - #[tracing::instrument(skip_all, name = "event loop", - fields(builder_parent_block_references = %self.parent_block_references))] - pub fn event_loop(mut self) { - let _builder_handle = async_spawn(async move { - loop { - tracing::debug!( - "Builder {:?} event loop", - self.parent_block_references.view_number - ); - futures::select! { - req = self.req_receiver.next() => { - tracing::debug!("Received request msg in builder {:?}: {:?}", self.parent_block_references.view_number, req); - match req { - Some(req) => { - if let MessageType::RequestMessage(req) = req { - tracing::debug!( - "Received request msg in builder {:?}: {:?}", - self.parent_block_references.view_number, - req - ); - self.process_block_request(req).await; - } else { - tracing::warn!("Unexpected message on requests channel: {:?}", req); - } - } - None => { - tracing::warn!("No more request messages to consume"); - } - } - }, - da = self.da_proposal_receiver.next() => { - match da { - Some(da) => { - if let MessageType::DaProposalMessage(rda_msg) = da { - tracing::debug!("Received da proposal msg in builder {:?}:\n {:?}", self.parent_block_references, rda_msg.proposal.data.view_number); - self.process_da_proposal(rda_msg).await; - } else { - tracing::warn!("Unexpected message on da proposals channel: {:?}", da); - } - } - None => { - tracing::warn!("No more da proposal messages to consume"); - } - } - }, - quorum = self.quorum_proposal_receiver.next() => { - match quorum { - Some(quorum) => { - if let MessageType::QuorumProposalMessage(rquorum_msg) = quorum { - tracing::debug!("Received quorum proposal msg in builder {:?}:\n {:?} for view ", self.parent_block_references, rquorum_msg.proposal.data.view_number); - self.process_quorum_proposal(rquorum_msg).await; - } else { - tracing::warn!("Unexpected message on quorum proposals channel: {:?}", quorum); - } - } - None => { - tracing::warn!("No more quorum proposal messages to consume"); - } - } - }, - decide = self.decide_receiver.next() => { - match decide { - Some(decide) => { - if let MessageType::DecideMessage(rdecide_msg) = decide { - let latest_decide_view_num = rdecide_msg.latest_decide_view_number; - tracing::debug!("Received decide msg view {:?} in builder {:?}", - &latest_decide_view_num, - self.parent_block_references); - let decide_status = self.process_decide_event(rdecide_msg).await; - match decide_status{ - Some(Status::ShouldExit) => { - tracing::info!("Exiting builder {:?} with decide view {:?}", - self.parent_block_references, - &latest_decide_view_num); - return; - } - Some(Status::ShouldContinue) => { - tracing::debug!("Continuing builder {:?}", - self.parent_block_references); - continue; - } - None => { - tracing::warn!("decide_status was None; Continuing builder {:?}", - self.parent_block_references); - continue; - } - } - } else { - tracing::warn!("Unexpected message on decide channel: {:?}", decide); - } - } - None => { - tracing::warn!("No more decide messages to consume"); - } - } - }, - }; - } - }); - } -} +// #[derive(Debug)] +// pub struct BuilderState { +// /// Recent included txs set while building blocks +// pub included_txns: HashSet>, +// +// /// Old txs to be garbage collected +// pub included_txns_old: HashSet>, +// +// /// Expiring txs to be garbage collected +// pub included_txns_expiring: HashSet>, +// +// /// txns currently in the `tx_queue` +// pub txns_in_queue: HashSet>, +// +// /// filtered queue of available transactions, taken from `tx_receiver` +// pub tx_queue: VecDeque>>, +// +// /// `da_proposal_payload_commit` to (`da_proposal`, `node_count`) +// #[allow(clippy::type_complexity)] +// pub da_proposal_payload_commit_to_da_proposal: +// HashMap<(BuilderCommitment, Types::View), DAProposalInfo>, +// +// /// `quorum_proposal_payload_commit` to `quorum_proposal` +// #[allow(clippy::type_complexity)] +// pub quorum_proposal_payload_commit_to_quorum_proposal: +// HashMap<(BuilderCommitment, Types::View), Arc>>>, +// +// /// Spawned-from references to the parent block. +// pub parent_block_references: ParentBlockReferences, +// +// // Channel Receivers for the HotShot events, Tx_receiver could also receive the external transactions +// /// decide receiver +// pub decide_receiver: BroadcastReceiver>, +// +// /// da proposal receiver +// pub da_proposal_receiver: BroadcastReceiver>, +// +// /// quorum proposal receiver +// pub quorum_proposal_receiver: BroadcastReceiver>, +// +// /// channel receiver for the block requests +// pub req_receiver: BroadcastReceiver>, +// +// /// incoming stream of transactions +// pub tx_receiver: BroadcastReceiver>>, +// +// /// global state handle, defined in the service.rs +// pub global_state: Arc>>, +// +// /// locally spawned builder Commitements +// pub builder_commitments: HashSet<(BuilderStateId, BuilderCommitment)>, +// +// /// timeout for maximising the txns in the block +// pub maximize_txn_capture_timeout: Duration, +// +// /// constant fee that the builder will offer per byte of data sequenced +// pub base_fee: u64, +// +// /// validated state that is required for a proposal to be considered valid. Needed for the +// /// purposes of building a valid block payload within the sequencer. +// pub validated_state: Arc, +// +// /// instance state to enfoce `max_block_size` +// pub instance_state: Arc, +// +// /// txn garbage collection every duration time +// pub txn_garbage_collect_duration: Duration, +// +// /// time of next garbage collection for txns +// pub next_txn_garbage_collect_time: Instant, +// +// /// `allow_empty_block_until` is a variable that dictates the time until which +// /// a builder should stop producing empty blocks. This is done specifically +// /// to allow for faster finalization of previous blocks that have had +// /// transactions included in them. +// pub allow_empty_block_until: Option, +// } +// +// /// [`best_builder_states_to_extend`] is a utility function that is used to +// /// in order to determine which [`BuilderState`]s are the best fit to extend +// /// from. +// /// +// /// This function is designed to inspect the current state of the global state +// /// in order to determine which [`BuilderState`]s are the best fit to extend +// /// from. We only want to use information from [`GlobalState`] as otherwise +// /// we would have some insider knowledge unique to our specific [`BuilderState`] +// /// rather than knowledge that is available to all [`BuilderState`]s. In fact, +// /// in order to ensure this, this function lives outside of the [`BuilderState`] +// /// itself. +// /// +// /// In an ideal circumstance the best [`BuilderState`] to extend from is going to +// /// be the one that is immediately preceding the [`QuorumProposal`] that we are +// /// attempting to extend from. However, if all we know is the view number of +// /// the [`QuorumProposal`] that we are attempting to extend from, then we may end +// /// up in a scenario where we have multiple [`BuilderState`]s that are all equally +// /// valid to extend from. When this happens, we have the potential for a data +// /// race. +// /// +// /// The primary cause of this has to due with the interface of the +// /// [`ProxyGlobalState`](crate::service::ProxyGlobalState)'s API. In general, +// /// we want to be able to retrieve a [`BuilderState`] via the [`BuilderStateId`]. +// /// The [`BuilderStateId`] only references a [`ViewNumber`](hotshot_types::data::ViewNumber) +// /// and a [`VidCommitment`] While this information is available in the [`QuorumProposal`], +// /// it only helps us to rule out [`BuilderState`]s that already exist. +// /// It does **NOT** help us to pick a [`BuilderState`] that is the best fit to extend from. +// /// +// /// This is where the `justify_qc` comes in to consideration. The `justify_qc` +// /// contains the previous [`ViewNumber`](hotshot_types::data::ViewNumber) that is +// /// being extended from, and in addition it also contains the previous [`Commitment>`] +// /// that is being built on top of. Since our [`BuilderState`]s store identifying +// /// information that contains this same `leaf_commit` we can compare these +// /// directly to ensure that we are extending from the correct [`BuilderState`]. +// /// +// /// This function determines the best [`BuilderState`] in the following steps: +// /// +// /// 1. If we have a [`BuilderState`] that is already spawned for the current +// /// [`QuorumProposal`], then we should should return no states, as one already +// /// exists. This will prevent us from attempting to spawn duplicate +// /// [`BuilderState`]s. +// /// 2. Attempt to find all [`BuilderState`]s that are recorded within +// /// [`GlobalState`] that have matching view number and leaf commitments. There +// /// *should* only be one of these. But all would be valid extension points. +// /// 3. If we can't find any [`BuilderState`]s that match the view number +// /// and leaf commitment, then we should return for the maximum stored view +// /// number that is smaller than the current [`QuorumProposal`]. +// /// 4. If there is is only one [`BuilderState`] stored in the [`GlobalState`], then +// /// we should return that [`BuilderState`] as the best fit. +// /// 5. If none of the other criteria match, we return an empty result as it is +// /// unclear what to do in this case. +// /// +// /// > Note: Any time this function returns more than a single entry in its +// /// > [HashSet] result, there is a potential for a race condition. This is +// /// > because there are multiple [BuilderState]s that are equally valid to +// /// > extend from. This race could be avoided by just picking one of the +// /// > entries in the resulting [HashSet], but this is not done here in order +// /// > to allow us to highlight the possibility of the race. +// async fn best_builder_states_to_extend( +// quorum_proposal: Arc>>, +// global_state: Arc>>, +// ) -> HashSet> { +// let current_view_number = quorum_proposal.data.view_number; +// let current_commitment = quorum_proposal.data.block_header.payload_commitment(); +// let current_builder_state_id = BuilderStateId:: { +// parent_commitment: current_commitment, +// parent_view: current_view_number, +// }; +// +// let global_state_read_lock = global_state.read_arc().await; +// +// // The first step is to check if we already have a spawned [BuilderState]. +// // If we do, then we should indicate that there is no best fit, as we +// // don't want to spawn another [BuilderState]. +// if global_state_read_lock +// .spawned_builder_states +// .contains_key(¤t_builder_state_id) +// { +// // We already have a spawned [BuilderState] for this proposal. +// // So we should just ignore it. +// return HashSet::new(); +// } +// +// // Next we want to see if there is an immediate match for a [BuilderState] +// // that we can extend from. This is the most ideal situation, as it +// // implies that we are extending from the correct [BuilderState]. +// // We do this by checking the `justify_qc` stored within the +// // [QuorumProposal], and checking it against the current spawned +// // [BuilderState]s +// let justify_qc = &quorum_proposal.data.justify_qc; +// let existing_states: HashSet<_> = global_state_read_lock +// .spawned_builder_states +// .iter() +// .filter( +// |(_, (parent_block_references, _))| match parent_block_references { +// None => false, +// Some(parent_block_references) => { +// parent_block_references.leaf_commit == justify_qc.data.leaf_commit +// && parent_block_references.view_number == justify_qc.view_number +// } +// }, +// ) +// .map(|(builder_state_id, _)| builder_state_id.clone()) +// .collect(); +// +// // If we found any matching [BuilderState]s, then we should return them +// // as the best fit. +// if !existing_states.is_empty() { +// return existing_states; +// } +// +// // At this point, we don't have any "ideal" matches or scenarios. So we +// // need to look for a suitable fall-back. The best fallback condition to +// // start with is any [BuilderState] that has the maximum spawned view +// // number whose value is smaller than the current [QuorumProposal]. +// let maximum_stored_view_number_smaller_than_quorum_proposal = global_state_read_lock +// .spawned_builder_states +// .keys() +// .map(|builder_state_id| *builder_state_id.parent_view) +// .filter(|view_number| view_number < ¤t_view_number) +// .max(); +// +// // If we have a maximum view number that meets our criteria, then we should +// // return all [BuilderStateId]s that match this view number. +// // This can lead to multiple [BuilderStateId]s being returned. +// if let Some(maximum_stored_view_number_smaller_than_quorum_proposal) = +// maximum_stored_view_number_smaller_than_quorum_proposal +// { +// // If we are the maximum stored view number smaller than the quorum +// // proposal's view number, then we are the best fit. +// let mut result = HashSet::new(); +// for builder_state_id in +// global_state_read_lock +// .spawned_builder_states +// .keys() +// .filter(|builder_state_id| { +// builder_state_id.parent_view.u64() +// == maximum_stored_view_number_smaller_than_quorum_proposal +// }) +// { +// result.insert(builder_state_id.clone()); +// } +// return result; +// } +// +// // This is our last ditch effort to continue making progress. If there is +// // only one [BuilderState] active, then we should return that as the best +// // fit, as it will be the only way we can continue making progress with +// // the builder. +// if global_state_read_lock.spawned_builder_states.len() == 1 { +// let mut result = HashSet::new(); +// for builder_state_id in global_state_read_lock.spawned_builder_states.keys() { +// result.insert(builder_state_id.clone()); +// } +// return result; +// } +// +// // This implies that there are only larger [BuilderState]s active than +// // the one we are. This is weird, it implies that some sort of time +// // travel has occurred view-wise. It is unclear what to do in this +// // situation. +// +// HashSet::new() +// } +// +// impl BuilderState { +// /// Utility method that attempts to determine whether +// /// we are among the best [`BuilderState`]s to extend from. +// async fn am_i_the_best_builder_state_to_extend( +// &self, +// quorum_proposal: Arc>>, +// ) -> bool { +// let best_builder_states_to_extend = +// best_builder_states_to_extend(quorum_proposal.clone(), self.global_state.clone()).await; +// +// tracing::debug!( +// "{}@{} thinks these are the best builder states to extend from: {:?} for proposal {}@{}", +// self.parent_block_references.vid_commitment, +// self.parent_block_references.view_number.u64(), +// best_builder_states_to_extend +// .iter() +// .map(|builder_state_id| format!( +// "{}@{}", +// builder_state_id.parent_commitment, +// builder_state_id.parent_view.u64() +// )) +// .collect::>(), +// quorum_proposal.data.block_header.payload_commitment(), +// quorum_proposal.data.view_number.u64(), +// ); +// +// // We are a best fit if we are contained within the returned set of +// // best [BuilderState]s to extend from. +// best_builder_states_to_extend.contains(&BuilderStateId { +// parent_commitment: self.parent_block_references.vid_commitment, +// parent_view: self.parent_block_references.view_number, +// }) +// } +// +// /// processing the DA proposal +// #[tracing::instrument(skip_all, name = "process da proposal", +// fields(builder_parent_block_references = %self.parent_block_references))] +// async fn process_da_proposal(&mut self, da_msg: DaProposalMessage) { +// tracing::debug!( +// "Builder Received DA message for view {:?}", +// da_msg.proposal.data.view_number +// ); +// +// // we do not have the option to ignore DA proposals if we want to be able to handle failed view reorgs. +// +// // If the respective builder state exists to handle the request +// let proposal = da_msg.proposal.clone(); +// +// // get the view number and encoded txns from the da_proposal_data +// let view_number = proposal.data.view_number; +// let encoded_txns = &proposal.data.encoded_transactions; +// +// let metadata = &proposal.data.metadata; +// +// // form a block payload from the encoded transactions +// let block_payload = +// >::from_bytes(encoded_txns, metadata); +// // get the builder commitment from the block payload +// let payload_builder_commitment = block_payload.builder_commitment(metadata); +// +// tracing::debug!( +// "Extracted builder commitment from the da proposal: {:?}", +// payload_builder_commitment +// ); +// +// // form the DA proposal info +// let da_proposal_info = DAProposalInfo { +// view_number, +// proposal, +// }; +// +// let std::collections::hash_map::Entry::Vacant(e) = self +// .da_proposal_payload_commit_to_da_proposal +// .entry((payload_builder_commitment.clone(), view_number)) +// else { +// tracing::debug!("Payload commitment already exists in the da_proposal_payload_commit_to_da_proposal hashmap, so ignoring it"); +// return; +// }; +// +// // if we have matching da and quorum proposals, we can skip storing the one, and remove +// // the other from storage, and call build_block with both, to save a little space. +// +// let Entry::Occupied(quorum_proposal) = self +// .quorum_proposal_payload_commit_to_quorum_proposal +// .entry((payload_builder_commitment.clone(), view_number)) +// else { +// e.insert(da_proposal_info); +// return; +// }; +// +// let quorum_proposal = quorum_proposal.remove(); +// +// // if we have a matching quorum proposal +// // if (this is the correct parent or +// // (the correct parent is missing and this is the highest view)) +// // spawn a clone +// if quorum_proposal.data.view_number != view_number { +// tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); +// return; +// } +// +// tracing::info!( +// "Spawning a clone from process DA proposal for view number: {:?}", +// view_number +// ); +// // remove this entry from quorum_proposal_payload_commit_to_quorum_proposal +// self.quorum_proposal_payload_commit_to_quorum_proposal +// .remove(&(payload_builder_commitment.clone(), view_number)); +// self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) +// .await; +// } +// +// /// processing the quorum proposal +// //#[tracing::instrument(skip_all, name = "Process Quorum Proposal")] +// #[tracing::instrument(skip_all, name = "process quorum proposal", +// fields(builder_parent_block_references = %self.parent_block_references))] +// async fn process_quorum_proposal(&mut self, quorum_msg: QuorumProposalMessage) { +// tracing::debug!( +// "Builder Received Quorum proposal message for view {:?}", +// quorum_msg.proposal.data.view_number +// ); +// +// // Two cases to handle: +// // Case 1: Bootstrapping phase +// // Case 2: No intended builder state exist +// // To handle both cases, we can have the highest view number builder state running +// // and only doing the insertion if and only if intended builder state for a particulat view is not present +// // check the presence of quorum_proposal.data.view_number-1 in the spawned_builder_states list +// let quorum_proposal = &quorum_msg.proposal; +// let view_number = quorum_proposal.data.view_number; +// let payload_builder_commitment = quorum_proposal.data.block_header.builder_commitment(); +// +// tracing::debug!( +// "Extracted payload builder commitment from the quorum proposal: {:?}", +// payload_builder_commitment +// ); +// +// let std::collections::hash_map::Entry::Vacant(e) = self +// .quorum_proposal_payload_commit_to_quorum_proposal +// .entry((payload_builder_commitment.clone(), view_number)) +// else { +// tracing::debug!("Payload commitment already exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, so ignoring it"); +// return; +// }; +// +// // first check whether vid_commitment exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, if yer, ignore it, otherwise validate it and later insert in +// // if we have matching da and quorum proposals, we can skip storing the one, and remove the other from storage, and call build_block with both, to save a little space. +// let Entry::Occupied(da_proposal) = self +// .da_proposal_payload_commit_to_da_proposal +// .entry((payload_builder_commitment.clone(), view_number)) +// else { +// e.insert(quorum_proposal.clone()); +// return; +// }; +// +// let da_proposal_info = da_proposal.remove(); +// // remove the entry from the da_proposal_payload_commit_to_da_proposal hashmap +// self.da_proposal_payload_commit_to_da_proposal +// .remove(&(payload_builder_commitment.clone(), view_number)); +// +// // also make sure we clone for the same view number( check incase payload commitments are same) +// if da_proposal_info.view_number != view_number { +// tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); +// return; +// } +// +// tracing::info!( +// "Spawning a clone from process quorum proposal for view number: {:?}", +// view_number +// ); +// +// self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) +// .await; +// } +// +// /// A helper function that is used by both [`BuilderState::process_da_proposal`] +// /// and [`BuilderState::process_quorum_proposal`] to spawn a new [`BuilderState`] +// /// that extends from the current [`BuilderState`]. +// /// +// /// This helper function also adds additional checks in order to ensure +// /// that the [`BuilderState`] that is being spawned is the best fit for the +// /// [`QuorumProposal`] that is being extended from. +// async fn spawn_clone_that_extends_self( +// &mut self, +// da_proposal_info: DAProposalInfo, +// quorum_proposal: Arc>>, +// ) { +// if !self +// .am_i_the_best_builder_state_to_extend(quorum_proposal.clone()) +// .await +// { +// tracing::debug!( +// "{} is not the best fit for forking, {}@{}, so ignoring the quorum proposal, and leaving it to another BuilderState", +// self.parent_block_references, +// quorum_proposal.data.block_header.payload_commitment(), +// quorum_proposal.data.view_number.u64(), +// ); +// return; +// } +// +// let (req_sender, req_receiver) = broadcast(self.req_receiver.capacity()); +// +// tracing::debug!( +// "extending BuilderState with a clone from {} with new proposal {}@{}", +// self.parent_block_references, +// quorum_proposal.data.block_header.payload_commitment(), +// quorum_proposal.data.view_number.u64() +// ); +// // We literally fork ourselves +// self.clone_with_receiver(req_receiver) +// .spawn_clone(da_proposal_info, quorum_proposal.clone(), req_sender) +// .await; +// } +// +// /// processing the decide event +// #[tracing::instrument(skip_all, name = "process decide event", +// fields(builder_parent_block_references = %self.parent_block_references))] +// async fn process_decide_event(&mut self, decide_msg: DecideMessage) -> Option { +// // Exit out all the builder states if their parent_block_references.view_number is less than the latest_decide_view_number +// // The only exception is that we want to keep the highest view number builder state active to ensure that +// // we have a builder state to handle the incoming DA and quorum proposals +// let decide_view_number = decide_msg.latest_decide_view_number; +// +// let retained_view_cutoff = self +// .global_state +// .write_arc() +// .await +// .remove_handles(decide_view_number); +// if self.parent_block_references.view_number < retained_view_cutoff { +// tracing::info!( +// "Decide@{:?}; Task@{:?} exiting; views < {:?} being reclaimed", +// decide_view_number.u64(), +// self.parent_block_references.view_number.u64(), +// retained_view_cutoff.u64(), +// ); +// return Some(Status::ShouldExit); +// } +// tracing::info!( +// "Decide@{:?}; Task@{:?} not exiting; views >= {:?} being retained", +// decide_view_number.u64(), +// self.parent_block_references.view_number.u64(), +// retained_view_cutoff.u64(), +// ); +// +// Some(Status::ShouldContinue) +// } +// +// // spawn a clone of the builder state +// #[tracing::instrument(skip_all, name = "spawn_clone", +// fields(builder_parent_block_references = %self.parent_block_references))] +// async fn spawn_clone( +// mut self, +// da_proposal_info: DAProposalInfo, +// quorum_proposal: Arc>>, +// req_sender: BroadcastSender>, +// ) { +// let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); +// +// // We replace our parent_block_references with information from the +// // quorum proposal. This is identifying the block that this specific +// // instance of [BuilderState] is attempting to build for. +// self.parent_block_references = ParentBlockReferences { +// view_number: quorum_proposal.data.view_number, +// vid_commitment: quorum_proposal.data.block_header.payload_commitment(), +// leaf_commit: leaf.legacy_commit(), +// builder_commitment: quorum_proposal.data.block_header.builder_commitment(), +// }; +// +// let builder_state_id = BuilderStateId { +// parent_commitment: self.parent_block_references.vid_commitment, +// parent_view: self.parent_block_references.view_number, +// }; +// +// { +// // Let's ensure that we don't already have one of these BuilderStates +// // running already. +// +// let global_state_read_lock = self.global_state.read_arc().await; +// if global_state_read_lock +// .spawned_builder_states +// .contains_key(&builder_state_id) +// { +// tracing::warn!( +// "Aborting spawn_clone, builder state already exists in spawned_builder_states: {:?}", +// builder_state_id +// ); +// return; +// } +// } +// +// let encoded_txns = &da_proposal_info.proposal.data.encoded_transactions; +// let metadata = &da_proposal_info.proposal.data.metadata; +// +// let block_payload = +// >::from_bytes(encoded_txns, metadata); +// let txn_commitments = block_payload.transaction_commitments(metadata); +// +// for tx in txn_commitments.iter() { +// self.txns_in_queue.remove(tx); +// } +// +// self.included_txns.extend(txn_commitments.iter()); +// self.tx_queue +// .retain(|tx| self.txns_in_queue.contains(&tx.commit)); +// +// if !txn_commitments.is_empty() { +// self.allow_empty_block_until = Some(Types::View::new( +// da_proposal_info.view_number.u64() + ALLOW_EMPTY_BLOCK_PERIOD, +// )); +// } +// +// // register the spawned builder state to spawned_builder_states in the global state +// self.global_state.write_arc().await.register_builder_state( +// BuilderStateId { +// parent_commitment: self.parent_block_references.vid_commitment, +// parent_view: self.parent_block_references.view_number, +// }, +// self.parent_block_references.clone(), +// req_sender, +// ); +// +// self.event_loop(); +// } +// +// // build a block +// #[tracing::instrument(skip_all, name = "build block", +// fields(builder_parent_block_references = %self.parent_block_references))] +// async fn build_block( +// &mut self, +// state_id: BuilderStateId, +// ) -> Option> { +// let timeout_after = Instant::now() + self.maximize_txn_capture_timeout; +// let sleep_interval = self.maximize_txn_capture_timeout / 10; +// while Instant::now() <= timeout_after { +// self.collect_txns(timeout_after).await; +// +// if !self.tx_queue.is_empty() // we have transactions +// || Instant::now() + sleep_interval > timeout_after +// // we don't have time for another iteration +// { +// break; +// } +// +// async_sleep(sleep_interval).await +// } +// +// // should_prioritize_finalization is a flag that is used to determine +// // whether we should return empty blocks or not. +// +// let should_prioritize_finalization = self +// .allow_empty_block_until +// .map(|until| state_id.parent_view < until) +// .unwrap_or(false); +// +// if self.tx_queue.is_empty() && !should_prioritize_finalization { +// // Don't build an empty block +// return None; +// } +// +// let max_block_size = self +// .global_state +// .read_arc() +// .await +// .block_size_limits +// .max_block_size; +// let transactions_to_include = self.tx_queue.iter().scan(0, |total_size, tx| { +// let prev_size = *total_size; +// *total_size += tx.min_block_size; +// // We will include one transaction over our target block length +// // if it's the first transaction in queue, otherwise we'd have a possible failure +// // state where a single transaction larger than target block state is stuck in +// // queue and we just build empty blocks forever +// if *total_size >= max_block_size && prev_size != 0 { +// None +// } else { +// Some(tx.transaction.clone()) +// } +// }); +// +// let Ok((payload, metadata)) = +// >::from_transactions( +// transactions_to_include, +// &self.validated_state, +// &self.instance_state, +// ) +// .await +// else { +// tracing::warn!("build block, returning None"); +// return None; +// }; +// +// let builder_hash = payload.builder_commitment(&metadata); +// // count the number of txns +// let actual_txn_count = payload.num_transactions(&metadata); +// +// // Payload is empty despite us checking that tx_queue isn't empty earlier. +// // +// // This means that the block was truncated due to *sequencer* block length +// // limits, which are different from our `max_block_size`. There's no good way +// // for us to check for this in advance, so we detect transactions too big for +// // the sequencer indirectly, by observing that we passed some transactions +// // to `>::from_transactions`, but +// // it returned an empty block. +// // Thus we deduce that the first transaction in our queue is too big to *ever* +// // be included, because it alone goes over sequencer's block size limit. +// // We need to drop it and mark as "included" so that if we receive +// // it again we don't even bother with it. +// if actual_txn_count == 0 && !should_prioritize_finalization { +// if let Some(txn) = self.tx_queue.pop_front() { +// self.txns_in_queue.remove(&txn.commit); +// self.included_txns.insert(txn.commit); +// }; +// return None; +// } +// +// // insert the recently built block into the builder commitments +// self.builder_commitments +// .insert((state_id, builder_hash.clone())); +// +// let encoded_txns: Vec = payload.encode().to_vec(); +// let block_size: u64 = encoded_txns.len() as u64; +// let offered_fee: u64 = self.base_fee * block_size; +// +// // Get the number of nodes stored while processing the `claim_block_with_num_nodes` request +// // or upon initialization. +// let num_nodes = self.global_state.read_arc().await.num_nodes; +// +// let (trigger_send, trigger_recv) = oneshot(); +// +// // spawn a task to calculate the VID commitment, and pass the handle to the global state +// // later global state can await on it before replying to the proposer +// let (unbounded_sender, unbounded_receiver) = unbounded(); +// #[allow(unused_must_use)] +// async_spawn(async move { +// let Ok(TriggerStatus::Start) = trigger_recv.recv().await else { +// return; +// }; +// +// let join_handle = +// spawn_blocking(move || precompute_vid_commitment(&encoded_txns, num_nodes)); +// #[cfg(async_executor_impl = "tokio")] +// let (vidc, pre_compute_data) = join_handle.await.unwrap(); +// #[cfg(async_executor_impl = "async-std")] +// let (vidc, pre_compute_data) = join_handle.await; +// unbounded_sender.send((vidc, pre_compute_data)).await; +// }); +// +// tracing::info!( +// "Builder view num {:?}, building block with {:?} txns, with builder hash {:?}", +// self.parent_block_references.view_number, +// actual_txn_count, +// builder_hash +// ); +// +// Some(BuildBlockInfo { +// id: BlockId { +// view: self.parent_block_references.view_number, +// hash: builder_hash, +// }, +// block_size, +// offered_fee, +// block_payload: payload, +// metadata, +// vid_trigger: trigger_send, +// vid_receiver: unbounded_receiver, +// truncated: actual_txn_count < self.tx_queue.len(), +// }) +// } +// +// async fn process_block_request(&mut self, req: RequestMessage) { +// // If a spawned clone is active then it will handle the request, otherwise the highest view num builder will handle +// if req.state_id.parent_commitment != self.parent_block_references.vid_commitment +// || req.state_id.parent_view != self.parent_block_references.view_number +// { +// tracing::debug!( +// "Builder {:?} Requested Builder commitment does not match the built_from_view, so ignoring it", +// self.parent_block_references.view_number +// ); +// return; +// } +// +// let highest_view_num_builder_id = self +// .global_state +// .read_arc() +// .await +// .highest_view_num_builder_id +// .clone(); +// +// if highest_view_num_builder_id.parent_view != self.parent_block_references.view_number { +// tracing::debug!( +// "Builder {:?} Requested Builder commitment does not match the highest_view_num_builder_id, so ignoring it", +// self.parent_block_references.view_number +// ); +// return; +// } +// +// tracing::info!( +// "Request for parent {} handled by builder with view {:?}", +// req.state_id, +// self.parent_block_references.view_number, +// ); +// let response = self.build_block(req.state_id.clone()).await; +// +// let Some(response) = response else { +// tracing::debug!("No response to send"); +// return; +// }; +// +// // form the response message +// let response_msg = ResponseMessage { +// builder_hash: response.id.hash.clone(), +// block_size: response.block_size, +// offered_fee: response.offered_fee, +// }; +// +// let builder_hash = response.id.hash.clone(); +// self.global_state.write_arc().await.update_global_state( +// req.state_id.clone(), +// response, +// response_msg.clone(), +// ); +// +// // ... and finally, send the response +// if let Err(e) = req.response_channel.send(response_msg).await { +// tracing::warn!( +// "Builder {:?} failed to send response to {:?} with builder hash {:?}, Err: {:?}", +// self.parent_block_references.view_number, +// req, +// builder_hash, +// e +// ); +// return; +// } +// +// tracing::info!( +// "Builder {:?} Sent response to the request{:?} with builder hash {:?}", +// self.parent_block_references.view_number, +// req, +// builder_hash +// ); +// } +// +// #[tracing::instrument(skip_all, name = "event loop", +// fields(builder_parent_block_references = %self.parent_block_references))] +// pub fn event_loop(mut self) { +// let _builder_handle = async_spawn(async move { +// loop { +// tracing::debug!( +// "Builder {:?} event loop", +// self.parent_block_references.view_number +// ); +// futures::select! { +// req = self.req_receiver.next() => { +// tracing::debug!("Received request msg in builder {:?}: {:?}", self.parent_block_references.view_number, req); +// match req { +// Some(req) => { +// if let MessageType::RequestMessage(req) = req { +// tracing::debug!( +// "Received request msg in builder {:?}: {:?}", +// self.parent_block_references.view_number, +// req +// ); +// self.process_block_request(req).await; +// } else { +// tracing::warn!("Unexpected message on requests channel: {:?}", req); +// } +// } +// None => { +// tracing::warn!("No more request messages to consume"); +// } +// } +// }, +// da = self.da_proposal_receiver.next() => { +// match da { +// Some(da) => { +// if let MessageType::DaProposalMessage(rda_msg) = da { +// tracing::debug!("Received da proposal msg in builder {:?}:\n {:?}", self.parent_block_references, rda_msg.proposal.data.view_number); +// self.process_da_proposal(rda_msg).await; +// } else { +// tracing::warn!("Unexpected message on da proposals channel: {:?}", da); +// } +// } +// None => { +// tracing::warn!("No more da proposal messages to consume"); +// } +// } +// }, +// quorum = self.quorum_proposal_receiver.next() => { +// match quorum { +// Some(quorum) => { +// if let MessageType::QuorumProposalMessage(rquorum_msg) = quorum { +// tracing::debug!("Received quorum proposal msg in builder {:?}:\n {:?} for view ", self.parent_block_references, rquorum_msg.proposal.data.view_number); +// self.process_quorum_proposal(rquorum_msg).await; +// } else { +// tracing::warn!("Unexpected message on quorum proposals channel: {:?}", quorum); +// } +// } +// None => { +// tracing::warn!("No more quorum proposal messages to consume"); +// } +// } +// }, +// decide = self.decide_receiver.next() => { +// match decide { +// Some(decide) => { +// if let MessageType::DecideMessage(rdecide_msg) = decide { +// let latest_decide_view_num = rdecide_msg.latest_decide_view_number; +// tracing::debug!("Received decide msg view {:?} in builder {:?}", +// &latest_decide_view_num, +// self.parent_block_references); +// let decide_status = self.process_decide_event(rdecide_msg).await; +// match decide_status{ +// Some(Status::ShouldExit) => { +// tracing::info!("Exiting builder {:?} with decide view {:?}", +// self.parent_block_references, +// &latest_decide_view_num); +// return; +// } +// Some(Status::ShouldContinue) => { +// tracing::debug!("Continuing builder {:?}", +// self.parent_block_references); +// continue; +// } +// None => { +// tracing::warn!("decide_status was None; Continuing builder {:?}", +// self.parent_block_references); +// continue; +// } +// } +// } else { +// tracing::warn!("Unexpected message on decide channel: {:?}", decide); +// } +// } +// None => { +// tracing::warn!("No more decide messages to consume"); +// } +// } +// }, +// }; +// } +// }); +// } +// } /// Unifies the possible messages that can be received by the builder #[derive(Debug, Clone)] pub enum MessageType { @@ -1013,447 +1004,124 @@ pub enum MessageType { QuorumProposalMessage(QuorumProposalMessage), RequestMessage(RequestMessage), } - -#[allow(clippy::too_many_arguments)] -impl BuilderState { - pub fn new( - parent_block_references: ParentBlockReferences, - decide_receiver: BroadcastReceiver>, - da_proposal_receiver: BroadcastReceiver>, - quorum_proposal_receiver: BroadcastReceiver>, - req_receiver: BroadcastReceiver>, - tx_receiver: BroadcastReceiver>>, - tx_queue: VecDeque>>, - global_state: Arc>>, - maximize_txn_capture_timeout: Duration, - base_fee: u64, - instance_state: Arc, - txn_garbage_collect_duration: Duration, - validated_state: Arc, - ) -> Self { - let txns_in_queue: HashSet<_> = tx_queue.iter().map(|tx| tx.commit).collect(); - BuilderState { - included_txns: HashSet::new(), - included_txns_old: HashSet::new(), - included_txns_expiring: HashSet::new(), - txns_in_queue, - parent_block_references, - decide_receiver, - da_proposal_receiver, - quorum_proposal_receiver, - req_receiver, - da_proposal_payload_commit_to_da_proposal: HashMap::new(), - quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), - tx_receiver, - tx_queue, - global_state, - builder_commitments: HashSet::new(), - maximize_txn_capture_timeout, - base_fee, - instance_state, - txn_garbage_collect_duration, - next_txn_garbage_collect_time: Instant::now() + txn_garbage_collect_duration, - validated_state, - allow_empty_block_until: None, - } - } - pub fn clone_with_receiver(&self, req_receiver: BroadcastReceiver>) -> Self { - // Handle the garbage collection of txns - let ( - included_txns, - included_txns_old, - included_txns_expiring, - next_txn_garbage_collect_time, - ) = if Instant::now() >= self.next_txn_garbage_collect_time { - ( - HashSet::new(), - self.included_txns.clone(), - self.included_txns_old.clone(), - Instant::now() + self.txn_garbage_collect_duration, - ) - } else { - ( - self.included_txns.clone(), - self.included_txns_old.clone(), - self.included_txns_expiring.clone(), - self.next_txn_garbage_collect_time, - ) - }; - - BuilderState { - included_txns, - included_txns_old, - included_txns_expiring, - txns_in_queue: self.txns_in_queue.clone(), - parent_block_references: self.parent_block_references.clone(), - decide_receiver: self.decide_receiver.clone(), - da_proposal_receiver: self.da_proposal_receiver.clone(), - quorum_proposal_receiver: self.quorum_proposal_receiver.clone(), - req_receiver, - da_proposal_payload_commit_to_da_proposal: HashMap::new(), - quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), - tx_receiver: self.tx_receiver.clone(), - tx_queue: self.tx_queue.clone(), - global_state: self.global_state.clone(), - builder_commitments: self.builder_commitments.clone(), - maximize_txn_capture_timeout: self.maximize_txn_capture_timeout, - base_fee: self.base_fee, - instance_state: self.instance_state.clone(), - txn_garbage_collect_duration: self.txn_garbage_collect_duration, - next_txn_garbage_collect_time, - validated_state: self.validated_state.clone(), - allow_empty_block_until: self.allow_empty_block_until, - } - } - - // collect outstanding transactions - async fn collect_txns(&mut self, timeout_after: Instant) { - while Instant::now() <= timeout_after { - match self.tx_receiver.try_recv() { - Ok(tx) => { - if self.included_txns.contains(&tx.commit) - || self.included_txns_old.contains(&tx.commit) - || self.included_txns_expiring.contains(&tx.commit) - || self.txns_in_queue.contains(&tx.commit) - { - continue; - } - self.txns_in_queue.insert(tx.commit); - self.tx_queue.push_back(tx); - } - Err(async_broadcast::TryRecvError::Empty) - | Err(async_broadcast::TryRecvError::Closed) => { - break; - } - Err(async_broadcast::TryRecvError::Overflowed(lost)) => { - tracing::warn!("Missed {lost} transactions due to backlog"); - continue; - } - } - } - } -} - -#[cfg(test)] -mod test { - use std::collections::HashMap; - - use async_broadcast::broadcast; - use committable::RawCommitmentBuilder; - use hotshot_example_types::block_types::TestTransaction; - use hotshot_example_types::node_types::TestTypes; - use hotshot_types::data::ViewNumber; - use hotshot_types::data::{Leaf, QuorumProposal}; - use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; - use hotshot_types::utils::BuilderCommitment; - use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; - - use super::DAProposalInfo; - use super::MessageType; - use super::ParentBlockReferences; - use crate::testing::{calc_builder_commitment, calc_proposal_msg, create_builder_state}; - - /// This test the function `process_da_propsal`. - /// It checkes da_proposal_payload_commit_to_da_proposal change appropriately - /// when receiving a da proposal message. - /// This test also checks whether corresponding BuilderStateId is in global_state. - #[async_std::test] - async fn test_process_da_proposal() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - tracing::info!("Testing the function `process_da_proposal` in `builder_state.rs`"); - - // Number of views to simulate - const NUM_ROUNDS: usize = 5; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; - // Number of nodes on DA committee - const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; - - // create builder_state without entering event loop - let (_senders, global_state, mut builder_state) = - create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; - - // randomly generate a transaction - let transactions = vec![TestTransaction::new(vec![1, 2, 3]); 3]; - let (_quorum_proposal, _quorum_proposal_msg, da_proposal_msg, builder_state_id) = - calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions.clone()).await; - - // sub-test one - // call process_da_proposal without matching quorum proposal message - // da_proposal_payload_commit_to_da_proposal should insert the message - let mut correct_da_proposal_payload_commit_to_da_proposal: HashMap< - (BuilderCommitment, ::View), - DAProposalInfo, - > = HashMap::new(); - let (payload_builder_commitment, da_proposal_info) = - calc_builder_commitment(da_proposal_msg.clone()).await; - - builder_state - .process_da_proposal(da_proposal_msg.clone()) - .await; - correct_da_proposal_payload_commit_to_da_proposal.insert( - ( - payload_builder_commitment, - da_proposal_msg.proposal.data.view_number, - ), - da_proposal_info, - ); - - assert_eq!( - builder_state.da_proposal_payload_commit_to_da_proposal, - correct_da_proposal_payload_commit_to_da_proposal - ); - // check global_state didn't change - if global_state - .read_arc() - .await - .spawned_builder_states - .contains_key(&builder_state_id) - { - panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); - } - - // sub-test two - // call process_da_proposal with the same msg again - // we should skip the process and everything should be the same - let transactions_1 = transactions.clone(); - let (_quorum_proposal_1, _quorum_proposal_msg_1, da_proposal_msg_1, builder_state_id_1) = - calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_1).await; - builder_state - .process_da_proposal(da_proposal_msg_1.clone()) - .await; - assert_eq!( - builder_state.da_proposal_payload_commit_to_da_proposal, - correct_da_proposal_payload_commit_to_da_proposal - ); - // check global_state didn't change - if global_state - .read_arc() - .await - .spawned_builder_states - .contains_key(&builder_state_id_1) - { - panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); - } - - // sub-test three - // add the matching quorum proposal message with different tx - // and call process_da_proposal with this matching da proposal message and quorum proposal message - // we should spawn_clone here - // and check whether global_state has correct BuilderStateId - let transactions_2 = vec![TestTransaction::new(vec![1, 2, 3, 4]); 2]; - let (_quorum_proposal_2, quorum_proposal_msg_2, da_proposal_msg_2, builder_state_id_2) = - calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_2).await; - - // process quorum proposal first, so that later when process_da_proposal we can directly call `build_block` and skip storage - builder_state - .process_quorum_proposal(quorum_proposal_msg_2.clone()) - .await; - - // process da proposal message and do the check - builder_state - .process_da_proposal(da_proposal_msg_2.clone()) - .await; - assert_eq!( - builder_state.da_proposal_payload_commit_to_da_proposal, - correct_da_proposal_payload_commit_to_da_proposal, - ); - // check global_state has this new builder_state_id - if global_state - .read_arc() - .await - .spawned_builder_states - .contains_key(&builder_state_id_2) - { - tracing::debug!("global_state updated successfully"); - } else { - panic!("global_state should have cooresponding builder_state_id as now we have matching quorum proposal."); - } - } - - /// This test the function `process_quorum_propsal`. - /// It checkes quorum_proposal_payload_commit_to_quorum_proposal change appropriately - /// when receiving a quorum proposal message. - /// This test also checks whether corresponding BuilderStateId is in global_state. - #[async_std::test] - async fn test_process_quorum_proposal() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - tracing::info!("Testing the function `process_quorum_proposal` in `builder_state.rs`"); - - // Number of views to simulate - const NUM_ROUNDS: usize = 5; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; - // Number of nodes on DA committee - const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; - - // create builder_state without entering event loop - let (_senders, global_state, mut builder_state) = - create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; - - // randomly generate a transaction - let transactions = vec![TestTransaction::new(vec![1, 2, 3]); 3]; - let (_quorum_proposal, quorum_proposal_msg, _da_proposal_msg, builder_state_id) = - calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions.clone()).await; - - // sub-test one - // call process_quorum_proposal without matching da proposal message - // quorum_proposal_payload_commit_to_quorum_proposal should insert the message - let mut correct_quorum_proposal_payload_commit_to_quorum_proposal = HashMap::new(); - builder_state - .process_quorum_proposal(quorum_proposal_msg.clone()) - .await; - correct_quorum_proposal_payload_commit_to_quorum_proposal.insert( - ( - quorum_proposal_msg - .proposal - .data - .block_header - .builder_commitment - .clone(), - quorum_proposal_msg.proposal.data.view_number, - ), - quorum_proposal_msg.proposal, - ); - assert_eq!( - builder_state - .quorum_proposal_payload_commit_to_quorum_proposal - .clone(), - correct_quorum_proposal_payload_commit_to_quorum_proposal.clone() - ); - // check global_state didn't change - if global_state - .read_arc() - .await - .spawned_builder_states - .contains_key(&builder_state_id) - { - panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); - } - - // sub-test two - // add the matching da proposal message with different tx - // and call process_da_proposal with this matching quorum proposal message and quorum da message - // we should spawn_clone here - // and check whether global_state has correct BuilderStateId - let transactions_2 = vec![TestTransaction::new(vec![2, 3, 4]); 2]; - let (_quorum_proposal_2, quorum_proposal_msg_2, da_proposal_msg_2, builder_state_id_2) = - calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_2).await; - - // process da proposal message first, so that later when process_da_proposal we can directly call `build_block` and skip storage - builder_state - .process_da_proposal(da_proposal_msg_2.clone()) - .await; - - // process quorum proposal, and do the check - builder_state - .process_quorum_proposal(quorum_proposal_msg_2.clone()) - .await; - - assert_eq!( - builder_state - .quorum_proposal_payload_commit_to_quorum_proposal - .clone(), - correct_quorum_proposal_payload_commit_to_quorum_proposal.clone() - ); - - // check global_state has this new builder_state_id - if global_state - .read_arc() - .await - .spawned_builder_states - .contains_key(&builder_state_id_2) - { - tracing::debug!("global_state updated successfully"); - } else { - panic!("global_state should have cooresponding builder_state_id as now we have matching da proposal."); - } - } - - /// This test the function `process_decide_event`. - /// It checkes whether we exit out correct builder states when there's a decide event coming in. - /// This test also checks whether corresponding BuilderStateId is removed in global_state. - #[async_std::test] - async fn test_process_decide_event() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); - tracing::info!("Testing the builder core with multiple messages from the channels"); - - // Number of views to simulate - const NUM_ROUNDS: usize = 5; - // Number of transactions to submit per round - const NUM_TXNS_PER_ROUND: usize = 4; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; - // Number of nodes on DA committee - const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; - - // create builder_state without entering event loop - let (_senders, global_state, mut builder_state) = - create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; - - // Transactions to send - let all_transactions = (0..NUM_ROUNDS) - .map(|round| { - (0..NUM_TXNS_PER_ROUND) - .map(|tx_num| TestTransaction::new(vec![round as u8, tx_num as u8])) - .collect::>() - }) - .collect::>(); - let mut prev_quorum_proposal: Option> = None; - // register some builder states for later decide event - #[allow(clippy::needless_range_loop)] - for round in 0..NUM_ROUNDS { - let transactions = all_transactions[round].clone(); - let (quorum_proposal, _quorum_proposal_msg, _da_proposal_msg, builder_state_id) = - calc_proposal_msg(NUM_STORAGE_NODES, round, prev_quorum_proposal, transactions) - .await; - prev_quorum_proposal = Some(quorum_proposal.clone()); - let (req_sender, _req_receiver) = broadcast(CHANNEL_CAPACITY); - let leaf: Leaf = Leaf::from_quorum_proposal(&quorum_proposal); - let leaf_commit = RawCommitmentBuilder::new("leaf commitment") - .u64_field("view number", leaf.view_number().u64()) - .u64_field("block number", leaf.height()) - .field("parent Leaf commitment", leaf.parent_commitment()) - .var_size_field( - "block payload commitment", - leaf.payload_commitment().as_ref(), - ) - .finalize(); - global_state.write_arc().await.register_builder_state( - builder_state_id, - ParentBlockReferences { - view_number: quorum_proposal.view_number, - vid_commitment: quorum_proposal.block_header.payload_commitment, - leaf_commit, - builder_commitment: quorum_proposal.block_header.builder_commitment, - }, - req_sender, - ); - } - - // send out a decide event in a middle round - let latest_decide_view_number = ViewNumber::new(3); - - let decide_message = MessageType::DecideMessage(crate::builder_state::DecideMessage { - latest_decide_view_number, - }); - if let MessageType::DecideMessage(practice_decide_msg) = decide_message.clone() { - builder_state - .process_decide_event(practice_decide_msg.clone()) - .await; - } else { - panic!("Not a decide_message in correct format"); - } - // check whether spawned_builder_states have correct builder_state_id and already exit-ed builder_states older than decides - let current_spawned_builder_states = - global_state.read_arc().await.spawned_builder_states.clone(); - current_spawned_builder_states - .iter() - .for_each(|(builder_state_id, _)| { - assert!(builder_state_id.parent_view >= latest_decide_view_number) - }); - } -} +// +// #[allow(clippy::too_many_arguments)] +// impl BuilderState { +// pub fn new( +// parent_block_references: ParentBlockReferences, +// decide_receiver: BroadcastReceiver>, +// da_proposal_receiver: BroadcastReceiver>, +// quorum_proposal_receiver: BroadcastReceiver>, +// req_receiver: BroadcastReceiver>, +// tx_receiver: BroadcastReceiver>>, +// tx_queue: VecDeque>>, +// global_state: Arc>>, +// maximize_txn_capture_timeout: Duration, +// base_fee: u64, +// instance_state: Arc, +// txn_garbage_collect_duration: Duration, +// validated_state: Arc, +// ) -> Self { +// let txns_in_queue: HashSet<_> = tx_queue.iter().map(|tx| tx.commit).collect(); +// BuilderState { +// included_txns: HashSet::new(), +// included_txns_old: HashSet::new(), +// included_txns_expiring: HashSet::new(), +// txns_in_queue, +// parent_block_references, +// decide_receiver, +// da_proposal_receiver, +// quorum_proposal_receiver, +// req_receiver, +// da_proposal_payload_commit_to_da_proposal: HashMap::new(), +// quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), +// tx_receiver, +// tx_queue, +// global_state, +// builder_commitments: HashSet::new(), +// maximize_txn_capture_timeout, +// base_fee, +// instance_state, +// txn_garbage_collect_duration, +// next_txn_garbage_collect_time: Instant::now() + txn_garbage_collect_duration, +// validated_state, +// allow_empty_block_until: None, +// } +// } +// pub fn clone_with_receiver(&self, req_receiver: BroadcastReceiver>) -> Self { +// // Handle the garbage collection of txns +// let ( +// included_txns, +// included_txns_old, +// included_txns_expiring, +// next_txn_garbage_collect_time, +// ) = if Instant::now() >= self.next_txn_garbage_collect_time { +// ( +// HashSet::new(), +// self.included_txns.clone(), +// self.included_txns_old.clone(), +// Instant::now() + self.txn_garbage_collect_duration, +// ) +// } else { +// ( +// self.included_txns.clone(), +// self.included_txns_old.clone(), +// self.included_txns_expiring.clone(), +// self.next_txn_garbage_collect_time, +// ) +// }; +// +// BuilderState { +// included_txns, +// included_txns_old, +// included_txns_expiring, +// txns_in_queue: self.txns_in_queue.clone(), +// parent_block_references: self.parent_block_references.clone(), +// decide_receiver: self.decide_receiver.clone(), +// da_proposal_receiver: self.da_proposal_receiver.clone(), +// quorum_proposal_receiver: self.quorum_proposal_receiver.clone(), +// req_receiver, +// da_proposal_payload_commit_to_da_proposal: HashMap::new(), +// quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), +// tx_receiver: self.tx_receiver.clone(), +// tx_queue: self.tx_queue.clone(), +// global_state: self.global_state.clone(), +// builder_commitments: self.builder_commitments.clone(), +// maximize_txn_capture_timeout: self.maximize_txn_capture_timeout, +// base_fee: self.base_fee, +// instance_state: self.instance_state.clone(), +// txn_garbage_collect_duration: self.txn_garbage_collect_duration, +// next_txn_garbage_collect_time, +// validated_state: self.validated_state.clone(), +// allow_empty_block_until: self.allow_empty_block_until, +// } +// } +// +// // collect outstanding transactions +// async fn collect_txns(&mut self, timeout_after: Instant) { +// while Instant::now() <= timeout_after { +// match self.tx_receiver.try_recv() { +// Ok(tx) => { +// if self.included_txns.contains(&tx.commit) +// || self.included_txns_old.contains(&tx.commit) +// || self.included_txns_expiring.contains(&tx.commit) +// || self.txns_in_queue.contains(&tx.commit) +// { +// continue; +// } +// self.txns_in_queue.insert(tx.commit); +// self.tx_queue.push_back(tx); +// } +// Err(async_broadcast::TryRecvError::Empty) +// | Err(async_broadcast::TryRecvError::Closed) => { +// break; +// } +// Err(async_broadcast::TryRecvError::Overflowed(lost)) => { +// tracing::warn!("Missed {lost} transactions due to backlog"); +// continue; +// } +// } +// } +// } +// } +// diff --git a/crates/legacy/src/lib.rs b/crates/legacy/src/lib.rs index 4e2e6eb7..c65ff133 100644 --- a/crates/legacy/src/lib.rs +++ b/crates/legacy/src/lib.rs @@ -15,74 +15,49 @@ pub mod builder_state; // Core interaction with the HotShot network +pub mod block_size_limits; pub mod service; // tracking the testing #[cfg(test)] pub mod testing; -use async_compatibility_layer::channel::UnboundedReceiver; -use hotshot_builder_api::v0_1::builder::BuildError; -use hotshot_types::traits::node_implementation::NodeType; +use std::{future::Future, pin::Pin}; + +use futures::future::BoxFuture; /// `WaitAndKeep` is a helper enum that allows for the lazy polling of a single /// value from an unbound receiver. -#[derive(Debug)] -pub enum WaitAndKeep { +#[derive(derive_more::Debug)] +pub enum WaitAndKeep { Keep(T), - Wait(UnboundedReceiver), -} - -#[derive(Debug)] -pub(crate) enum WaitAndKeepGetError { - FailedToResolvedVidCommitmentFromChannel, + #[debug("Wait")] + Wait(Pin + Send + Sync + 'static>>), } -impl From for BuildError { - fn from(e: WaitAndKeepGetError) -> Self { - match e { - WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel => { - BuildError::Error("failed to resolve VidCommitment from channel".to_string()) - } - } +impl WaitAndKeep { + pub fn new(fut: Pin + Send + Sync + 'static>>) -> Self { + Self::Wait(fut) } -} -impl WaitAndKeep { /// get will return a clone of the value that is already stored within the /// value of `WaitAndKeep::Keep` if the value is already resolved. Otherwise - /// it will poll the next value from the channel and replace the locally + /// it will await the future and replace the locally /// stored `WaitAndKeep::Wait` with the resolved value as a `WaitAndKeep::Keep`. - /// - /// Note: This pattern seems very similar to a Future, and ultimately - /// returns a future. It's not clear why this needs to be implemented - /// in such a way and not just implemented as a boxed future. - pub(crate) async fn get(&mut self) -> Result { + pub async fn resolve(&mut self) -> &T { match self { - WaitAndKeep::Keep(t) => Ok(t.clone()), + WaitAndKeep::Keep(t) => t, WaitAndKeep::Wait(fut) => { - let got = fut - .recv() - .await - .map_err(|_| WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel); - if let Ok(got) = &got { - let mut replace = WaitAndKeep::Keep(got.clone()); - core::mem::swap(self, &mut replace); - } - got + *self = WaitAndKeep::Keep(fut.await); + self.get_unchecked() } } } -} - -// TODO: Update commitment calculation with the new `commit`. -// -trait LegacyCommit { - fn legacy_commit(&self) -> committable::Commitment>; -} -impl LegacyCommit for hotshot_types::data::Leaf { - fn legacy_commit(&self) -> committable::Commitment> { - as committable::Committable>::commit(self) + fn get_unchecked(&self) -> &T { + match self { + WaitAndKeep::Keep(t) => t, + _ => unreachable!(), + } } } diff --git a/crates/legacy/src/service.rs b/crates/legacy/src/service.rs index 223e3cbb..5743208a 100644 --- a/crates/legacy/src/service.rs +++ b/crates/legacy/src/service.rs @@ -1,9 +1,14 @@ +use futures::future::Either; +use futures::FutureExt; use hotshot::types::Event; +use hotshot_builder_api::v0_1::builder::{define_api, submit_api, Error as BuilderApiError}; use hotshot_builder_api::v0_1::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::BuildError, data_source::{AcceptsTxnSubmits, BuilderDataSource}, }; +use hotshot_types::traits::block_contents::precompute_vid_commitment; +use hotshot_types::traits::EncodeBytes; use hotshot_types::{ data::{DaProposal, Leaf, QuorumProposal}, event::EventType, @@ -17,19 +22,26 @@ use hotshot_types::{ vid::{VidCommitment, VidPrecomputeData}, }; use lru::LruCache; -use vbs::version::StaticVersionType; +use marketplace_builder_shared::coordinator::BuilderStateLookup; +use tide_disco::app::AppError; +use vbs::version::{StaticVersion, StaticVersionType}; -use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; +use marketplace_builder_shared::{ + block::{ + BlockId, BuilderStateId, ParentBlockReferences, ReceivedTransaction, TransactionSource, + }, + coordinator::BuilderStateCoordinator, + utils::LegacyCommit, +}; -use crate::builder_state::{MessageType, RequestMessage, ResponseMessage}; +use crate::builder_state::{ + BuildBlockInfo, DaProposalMessage, DecideMessage, QuorumProposalMessage, TriggerStatus, +}; +use crate::WaitAndKeep; use crate::{ - builder_state::{ - BuildBlockInfo, DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, - TriggerStatus, - }, - LegacyCommit as _, + block_size_limits::BlockSizeLimits, + builder_state::{MessageType, RequestMessage, ResponseMessage}, }; -use crate::{WaitAndKeep, WaitAndKeepGetError}; pub use async_broadcast::{broadcast, RecvError, TryRecvError}; use async_broadcast::{Sender as BroadcastSender, TrySendError}; use async_compatibility_layer::{ @@ -38,18 +50,26 @@ use async_compatibility_layer::{ channel::{unbounded, OneShotSender}, }; use async_lock::RwLock; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use async_trait::async_trait; use committable::{Commitment, Committable}; -use futures::stream::StreamExt; use futures::{future::BoxFuture, Stream}; +use futures::{ + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + TryStreamExt, +}; use sha2::{Digest, Sha256}; -use std::collections::HashMap; use std::num::NonZeroUsize; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; +use std::{collections::HashMap, sync::atomic::AtomicUsize}; use std::{fmt::Display, time::Instant}; use tagged_base64::TaggedBase64; -use tide_disco::method::ReadState; +use tide_disco::{method::ReadState, App}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; // We will not increment max block value if we aren't able to serve a response // with a margin below [`ProxyGlobalState::max_api_waiting_time`] @@ -61,533 +81,137 @@ const VID_RESPONSE_TARGET_MARGIN_DIVISOR: u32 = 10; pub struct BlockInfo { pub block_payload: Types::BlockPayload, pub metadata: <::BlockPayload as BlockPayload>::Metadata, - pub vid_trigger: Arc>>>, - pub vid_receiver: Arc>>, + pub vid_data: WaitAndKeep<(VidCommitment, VidPrecomputeData)>, pub offered_fee: u64, // Could we have included more transactions with this block, but chose not to? pub truncated: bool, } -/// [`ReceivedTransaction`] represents receipt information concerning a received -/// [`NodeType::Transaction`]. -#[derive(Debug)] -pub struct ReceivedTransaction { - // the transaction - pub tx: Types::Transaction, - // transaction's hash - pub commit: Commitment, - // transaction's esitmated length - pub len: u64, - // transaction's source - pub source: TransactionSource, - // received time - pub time_in: Instant, -} - -/// Adjustable limits for block size ceiled by -/// maximum block size allowed by the protocol -#[derive(Debug, Clone)] -pub struct BlockSizeLimits { - // maximum block size allowed by the protocol - pub protocol_max_block_size: u64, - // estimated maximum block size we can build in time - pub max_block_size: u64, - pub increment_period: Duration, - pub last_block_size_increment: Instant, -} - -impl BlockSizeLimits { - /// Never go lower than 10 kilobytes - pub const MAX_BLOCK_SIZE_FLOOR: u64 = 10_000; - /// When adjusting max block size, it will be decremented or incremented - /// by current value / `MAX_BLOCK_SIZE_CHANGE_DIVISOR` - pub const MAX_BLOCK_SIZE_CHANGE_DIVISOR: u64 = 10; - - pub fn new(protocol_max_block_size: u64, increment_period: Duration) -> Self { - Self { - protocol_max_block_size, - max_block_size: protocol_max_block_size, - increment_period, - last_block_size_increment: Instant::now(), - } - } - - /// If increment period has elapsed or `force` flag is set, - /// increment [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] - /// with [`Self::protocol_max_block_size`] as a ceiling - pub fn try_increment_block_size(&mut self, force: bool) { - if force || self.last_block_size_increment.elapsed() >= self.increment_period { - self.max_block_size = std::cmp::min( - self.max_block_size - + self - .max_block_size - .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), - self.protocol_max_block_size, - ); - self.last_block_size_increment = Instant::now(); - } - } - - /// Decrement [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] - /// with [`Self::MAX_BLOCK_SIZE_FLOOR`] as a floor - pub fn decrement_block_size(&mut self) { - self.max_block_size = std::cmp::max( - self.max_block_size - - self - .max_block_size - .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), - Self::MAX_BLOCK_SIZE_FLOOR, - ); - } -} - -/// [`GlobalState`] represents the internalized state of the Builder service as -/// represented from its public facing API. -#[allow(clippy::type_complexity)] -#[derive(Debug)] pub struct GlobalState { - // data store for the blocks - pub blocks: lru::LruCache, BlockInfo>, - - // registered builder states - pub spawned_builder_states: HashMap< - BuilderStateId, - ( - // This is provided as an Option for convenience with initialization. - // When we build the initial state, we don't necessarily want to - // have to generate a valid ParentBlockReferences object. As doing - // such would require a bit of setup. Additionally it would - // result in the call signature to `GlobalState::new` changing. - // However for every subsequent BuilderState, we expect this value - // to be populated. - Option>, - BroadcastSender>, - ), - >, - - // builder state -> last built block , it is used to respond the client - // if the req channel times out during get_available_blocks - pub builder_state_to_last_built_block: HashMap, ResponseMessage>, - - // sending a transaction from the hotshot/private mempool to the builder states - // NOTE: Currently, we don't differentiate between the transactions from the hotshot and the private mempool - pub tx_sender: BroadcastSender>>, - - // last garbage collected view number - pub last_garbage_collected_view_num: Types::View, - - // highest view running builder task - pub highest_view_num_builder_id: BuilderStateId, - - pub block_size_limits: BlockSizeLimits, - - /// Number of nodes. - /// - /// Initial value may be updated by the `claim_block_with_num_nodes` endpoint. - pub num_nodes: usize, -} - -/// `GetChannelForMatchingBuilderError` is an error enum that represents the -/// class of possible errors that can be returned when calling -/// `get_channel_for_matching_builder_or_highest_view_builder` on a -/// `GlobalState`. These errors are used for internal representations for -/// consistency and testing, and do not leak beyond the `GlobalState` API. -/// As such, they intentionally do not implement traits for serialization. -#[derive(Debug)] -pub(crate) enum GetChannelForMatchingBuilderError { - NoBuilderStateFound, -} - -impl From for BuildError { - fn from(_error: GetChannelForMatchingBuilderError) -> Self { - BuildError::Error("No builder state found".to_string()) - } -} - -impl GlobalState { - /// Creates a new [`GlobalState`] with the given parameters. - /// The resulting [`GlobalState`] will have the given - /// `last_garbage_collected_view_num` as passed. Additionally, the - /// `highest_view_num_builder_id` will be set to a [`BuilderStateId`] - /// comprised of the given `bootstrapped_builder_state_id` and - /// `bootstrapped_view_num`. The `spawned_builder_states` will be created - /// with a single entry of the same [`BuilderStateId`] and the given - /// `bootstrap_sender`. - /// `protocol_max_block_size` is maximum block size allowed by the protocol, - /// e.g. `chain_config.max_block_size` for espresso-sequencer. - /// `max_block_size_increment_period` determines the interval between attempts - /// to increase the builder's block size limit if it is less than the protocol maximum. - #[allow(clippy::too_many_arguments)] - pub fn new( - bootstrap_sender: BroadcastSender>, - tx_sender: BroadcastSender>>, - bootstrapped_builder_state_id: VidCommitment, - bootstrapped_view_num: Types::View, - last_garbage_collected_view_num: Types::View, - max_block_size_increment_period: Duration, - protocol_max_block_size: u64, - num_nodes: usize, - ) -> Self { - let mut spawned_builder_states = HashMap::new(); - let bootstrap_id = BuilderStateId { - parent_commitment: bootstrapped_builder_state_id, - parent_view: bootstrapped_view_num, - }; - spawned_builder_states.insert(bootstrap_id.clone(), (None, bootstrap_sender.clone())); - GlobalState { - blocks: LruCache::new(NonZeroUsize::new(256).unwrap()), - spawned_builder_states, - tx_sender, - last_garbage_collected_view_num, - builder_state_to_last_built_block: Default::default(), - highest_view_num_builder_id: bootstrap_id, - block_size_limits: BlockSizeLimits::new( - protocol_max_block_size, - max_block_size_increment_period, - ), - num_nodes, - } - } - - /// Associates the given [`BuilderStateId`] with - /// the given [`BroadcastSender`] in the [`GlobalState`]. - /// - /// Additionally, if the view of the [`BuilderStateId`] is greater than the - /// current highest view number, the [`BuilderStateId`] is set as the new - /// highest view number. - /// - /// There is potential here for data loss. Since we just blindly insert - /// the [`BuilderStateId`] and [`BroadcastSender`] into the hashmap, we could - /// potentially be overwriting an existing entry. This would result in - /// the loss of access to a [`BroadcastSender`], and could potentially - /// result in unexpected behavior. - pub fn register_builder_state( - &mut self, - parent_id: BuilderStateId, - built_from_proposed_block: ParentBlockReferences, - request_sender: BroadcastSender>, - ) { - // register the builder state - let previous_value = self.spawned_builder_states.insert( - parent_id.clone(), - (Some(built_from_proposed_block), request_sender), - ); - - if let Some(previous_value) = previous_value { - tracing::warn!( - "builder {parent_id} overwrote previous spawned_builder_state entry: {:?}", - previous_value - ); - } - - // keep track of the max view number - if parent_id.parent_view > self.highest_view_num_builder_id.parent_view { - tracing::info!("registering builder {parent_id} as highest",); - self.highest_view_num_builder_id = parent_id; - } else { - tracing::warn!( - "builder {parent_id} created; highest registered is {}", - self.highest_view_num_builder_id, - ); - } - } - - /// Ensures that the given [`BuildBlockInfo`]'d id - /// is within the [`GlobalState`]'s [`blocks`](GlobalState::blocks) LRU Cache. The cache stores the - /// [`BlockInfo`] associated with the given [`BuildBlockInfo`]'s id. However - /// if it already exists within the LRU cache, then the `BlockInfo` is not - /// updated. - /// - /// Additionally, the [`BuilderStateId`] is associated with the given - /// [`ResponseMessage`] in the [`Self::builder_state_to_last_built_block`] hashmap. - /// - /// No care or consideration is given to anything that may have been - /// stored with the same key in the [`Self::builder_state_to_last_built_block`]. - pub fn update_global_state( - &mut self, - state_id: BuilderStateId, - build_block_info: BuildBlockInfo, - response_msg: ResponseMessage, - ) { - let BuildBlockInfo { - id, - block_payload, - metadata, - vid_trigger, - vid_receiver, - offered_fee, - truncated, - .. - } = build_block_info; - - let previous_cache_entry = self.blocks.put( - id.clone(), - BlockInfo { - block_payload, - metadata, - vid_trigger: Arc::new(RwLock::new(Some(vid_trigger))), - vid_receiver: Arc::new(RwLock::new(WaitAndKeep::Wait(vid_receiver))), - offered_fee, - truncated, - }, - ); - - // update the builder state to last built block - let previous_builder_state_entry = self - .builder_state_to_last_built_block - .insert(state_id, response_msg); - - if let Some(previous_builder_state_entry) = previous_builder_state_entry { - tracing::warn!( - "block {id} overwrote previous block: {:?}. previous cache entry: {:?}", - previous_builder_state_entry, - previous_cache_entry - ); - } - } - - /// Cleans up the [`GlobalState`] by removing all - /// `spawned_builder_states` that have been stored, up to a derived - /// reference view. This cutoff point can be up to the given - /// `on_decide_view` so long as the provided value is less than or equal - /// to the `highest_view_num_builder_id`'s view stored on the state. - /// Beyond that, the state prefers to drop all `spawned_builder_states` - /// preceding the derived cutoff view. - /// - /// In addition the `last_garbage_collected_view_num` is updated to the - /// target cutoff view number for tracking purposes. The value returned - /// is the cutoff view number such that the returned value indicates the - /// point before which everything was cleaned up. - pub fn remove_handles(&mut self, on_decide_view: Types::View) -> Types::View { - // remove everything from the spawned builder states when view_num <= on_decide_view; - // if we don't have a highest view > decide, use highest view as cutoff. - let cutoff = std::cmp::min(self.highest_view_num_builder_id.parent_view, on_decide_view); - self.spawned_builder_states - .retain(|id, _| id.parent_view >= cutoff); - - let cutoff_u64 = cutoff.u64(); - let gc_view = if cutoff_u64 > 0 { cutoff_u64 - 1 } else { 0 }; - - self.last_garbage_collected_view_num = Types::View::new(gc_view); - - cutoff - } - - // private mempool submit txn - // Currently, we don't differentiate between the transactions from the hotshot and the private mempool - pub async fn submit_client_txns( - &self, - txns: Vec<::Transaction>, - ) -> Vec::Transaction>, BuildError>> { - handle_received_txns( - &self.tx_sender, - txns, - TransactionSource::External, - self.block_size_limits.max_block_size, - ) - .await - } - - /// Helper function that attempts to retrieve the broadcast sender for the given - /// [`BuilderStateId`]. If the sender does not exist, it will return the - /// broadcast sender for the for the hightest view number [`BuilderStateId`] - /// instead. - pub(crate) fn get_channel_for_matching_builder_or_highest_view_builder( - &self, - key: &BuilderStateId, - ) -> Result<&BroadcastSender>, GetChannelForMatchingBuilderError> { - if let Some(id_and_sender) = self.spawned_builder_states.get(key) { - tracing::info!("Got matching builder for parent {}", key); - Ok(&id_and_sender.1) - } else { - tracing::warn!( - "failed to recover builder for parent {}, using highest view num builder with {}", - key, - self.highest_view_num_builder_id, - ); - // get the sender for the highest view number builder - self.spawned_builder_states - .get(&self.highest_view_num_builder_id) - .map(|(_, sender)| sender) - .ok_or(GetChannelForMatchingBuilderError::NoBuilderStateFound) - } - } - - // check for the existence of the builder state for a view - pub fn check_builder_state_existence_for_a_view(&self, key: &Types::View) -> bool { - // iterate over the spawned builder states and check if the view number exists - self.spawned_builder_states - .iter() - .any(|(id, _)| id.parent_view == *key) - } - - pub fn should_view_handle_other_proposals( - &self, - builder_view: &Types::View, - proposal_view: &Types::View, - ) -> bool { - *builder_view == self.highest_view_num_builder_id.parent_view - && !self.check_builder_state_existence_for_a_view(proposal_view) - } -} - -pub struct ProxyGlobalState { - // global state - global_state: Arc>>, - - // identity keys for the builder - // May be ideal place as GlobalState interacts with hotshot apis - // and then can sign on responders as desired + coordinator: Arc>, builder_keys: ( Types::BuilderSignatureKey, // pub key <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, // private key ), - - // max waiting time to serve first api request max_api_waiting_time: Duration, + blocks: RwLock, BlockInfo>>, + block_cache: RwLock, BlockId>>, + block_size_limits: RwLock, + maximize_txn_capture_timeout: Duration, + num_nodes: AtomicUsize, + instance_state: Types::InstanceState, + base_fee: u64, } -impl ProxyGlobalState { - pub fn new( - global_state: Arc>>, - builder_keys: ( - Types::BuilderSignatureKey, - <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, - ), - max_api_waiting_time: Duration, - ) -> Self { - ProxyGlobalState { - global_state, - builder_keys, - max_api_waiting_time, - } +impl GlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + /// Spawns an event loop handling HotShot events from the provided stream. + /// Returns a handle for the spawned task. + pub fn start_event_loop( + &self, + event_stream: impl Stream> + Unpin + Send + 'static, + ) -> JoinHandle> { + async_compatibility_layer::art::async_spawn(Self::event_loop( + self.coordinator.clone(), + event_stream, + )) } -} - -/// `AvailableBlocksError` is an error enum that represents the class of possible -/// errors that can be returned when calling `available_blocks` on a -/// `ProxyGlobalState`. These errors are used for internal representations -/// for consistency and testing, and do not leak beyond the `ProxyGlobalState` -/// API. As such, they intentionally do not implement traits for serialization. -#[derive(Debug)] -enum AvailableBlocksError { - SignatureValidationFailed, - RequestForAvailableViewThatHasAlreadyBeenDecided, - SigningBlockFailed( - <::BuilderSignatureKey as BuilderSignatureKey>::SignError, - ), - GetChannelForMatchingBuilderError(GetChannelForMatchingBuilderError), - NoBlocksAvailable, - ChannelUnexpectedlyClosed, -} -impl From for AvailableBlocksError { - fn from(error: GetChannelForMatchingBuilderError) -> Self { - AvailableBlocksError::GetChannelForMatchingBuilderError(error) - } -} + /// Internal implementation of the event loop, drives the underlying coordinator + /// and runs hooks + async fn event_loop( + coordinator: Arc>, + mut event_stream: impl Stream> + Unpin + Send + 'static, + ) -> anyhow::Result<()> { + loop { + let Some(event) = event_stream.next().await else { + anyhow::bail!("Event stream ended"); + }; -impl From> for BuildError { - fn from(error: AvailableBlocksError) -> Self { - match error { - AvailableBlocksError::SignatureValidationFailed => { - BuildError::Error("Signature validation failed in get_available_blocks".to_string()) - } - AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { - BuildError::Error( - "Request for available blocks for a view that has already been decided." - .to_string(), - ) - } - AvailableBlocksError::SigningBlockFailed(e) => { - BuildError::Error(format!("Signing over block info failed: {:?}", e)) - } - AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), - AvailableBlocksError::NoBlocksAvailable => { - BuildError::Error("No blocks available".to_string()) - } - AvailableBlocksError::ChannelUnexpectedlyClosed => { - BuildError::Error("Channel unexpectedly closed".to_string()) + match event.event { + EventType::Error { error } => { + tracing::error!("Error event in HotShot: {:?}", error); + } + EventType::Transactions { transactions } => { + // TODO: record results + let _ = transactions + .into_iter() + .map(|txn| { + coordinator.handle_transaction(ReceivedTransaction::new( + txn, + TransactionSource::Public, + )) + }) + .collect::>() + .collect::>() + .await; + } + EventType::Decide { leaf_chain, .. } => { + coordinator.handle_decide(leaf_chain).await; + } + EventType::DaProposal { proposal, .. } => { + coordinator.handle_da_proposal(proposal.data).await; + } + EventType::QuorumProposal { proposal, .. } => { + coordinator.handle_quorum_proposal(proposal.data).await; + } + _ => {} } } } -} -/// `ClaimBlockError` is an error enum that represents the class of possible -/// errors that can be returned when calling `claim_block` on a -/// `ProxyGlobalState`. These errors are used for internal representations -/// for consistency and testing, and do not leak beyond the `ProxyGlobalState` -/// API. As such, they intentionally do not implement traits for serialization. -#[derive(Debug)] -enum ClaimBlockError { - SignatureValidationFailed, - SigningCommitmentFailed( - <::BuilderSignatureKey as BuilderSignatureKey>::SignError, - ), - BlockDataNotFound, -} + /// Consumes `self` and returns a `tide_disco` [`App`] with builder and private mempool APIs registered + pub fn into_app( + self: Arc, + ) -> Result, BuilderApiError>, AppError> { + let proxy = ProxyGlobalState(self); + let builder_api = define_api::, Types>(&Default::default())?; -impl From> for BuildError { - fn from(error: ClaimBlockError) -> Self { - match error { - ClaimBlockError::SignatureValidationFailed => { - BuildError::Error("Signature validation failed in claim block".to_string()) - } - ClaimBlockError::SigningCommitmentFailed(e) => { - BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) - } - ClaimBlockError::BlockDataNotFound => { - BuildError::Error("Block data not found".to_string()) - } - } - } -} + // TODO: Replace StaticVersion with proper constant when added in HotShot + let private_mempool_api = + submit_api::, Types, StaticVersion<0, 1>>(&Default::default())?; -#[derive(Debug)] -enum ClaimBlockHeaderInputError { - SignatureValidationFailed, - BlockHeaderNotFound, - CouldNotGetVidInTime, - WaitAndKeepGetError(WaitAndKeepGetError), - FailedToSignVidCommitment( - <::BuilderSignatureKey as BuilderSignatureKey>::SignError, - ), - FailedToSignFeeInfo( - <::BuilderSignatureKey as BuilderSignatureKey>::SignError, - ), -} + let mut app: App, BuilderApiError> = App::with_state(proxy); -impl From> for BuildError { - fn from(error: ClaimBlockHeaderInputError) -> Self { - match error { - ClaimBlockHeaderInputError::SignatureValidationFailed => BuildError::Error( - "Signature validation failed in claim block header input".to_string(), - ), - ClaimBlockHeaderInputError::BlockHeaderNotFound => { - BuildError::Error("Block header not found".to_string()) - } - ClaimBlockHeaderInputError::CouldNotGetVidInTime => { - BuildError::Error("Couldn't get vid in time".to_string()) - } - ClaimBlockHeaderInputError::WaitAndKeepGetError(e) => e.into(), - ClaimBlockHeaderInputError::FailedToSignVidCommitment(e) => { - BuildError::Error(format!("Failed to sign VID commitment: {:?}", e)) - } - ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { - BuildError::Error(format!("Failed to sign fee info: {:?}", e)) - } - } + app.register_module( + hotshot_types::constants::MARKETPLACE_BUILDER_MODULE, + builder_api, + )?; + + app.register_module("txn_submit", private_mempool_api)?; + + Ok(app) } } -impl ProxyGlobalState { - async fn available_blocks_implementation( +#[derive(derive_more::Deref, derive_more::DerefMut)] +#[deref(forward)] +#[deref_mut(forward)] +pub struct ProxyGlobalState(pub Arc>); + +/* +Handling Builder API responses +*/ +#[async_trait] +impl BuilderDataSource for ProxyGlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + async fn available_blocks( &self, for_parent: &VidCommitment, view_number: u64, sender: Types::SignatureKey, signature: &::PureAssembledSignatureType, - ) -> Result>, AvailableBlocksError> { + ) -> Result>, BuildError> { let starting_time = Instant::now(); let state_id = BuilderStateId { @@ -598,468 +222,265 @@ impl ProxyGlobalState { // verify the signature if !sender.validate(signature, state_id.parent_commitment.as_ref()) { tracing::error!("Signature validation failed in get_available_blocks"); - return Err(AvailableBlocksError::SignatureValidationFailed); + return Err(BuildError::Error( + "Signature verification failed".to_owned(), + )); } tracing::info!("Requesting available blocks for {state_id}",); let view_num = state_id.parent_view; - // check in the local spawned builder states - // if it doesn't exist; there are three cases - // 1) it has already been garbage collected (view < decide) and we should return an error - // 2) it has not yet been created, and we should try to wait - // 3) we missed the triggering event, and should use the BuilderState with the highest available view - - { - // 1st case: Decide event received, and not bootstrapping. - // If this `BlockBuilder` hasn't been reaped, it should have been. - let global_state = self.global_state.read_arc().await; - if view_num < global_state.last_garbage_collected_view_num - && global_state.highest_view_num_builder_id.parent_view - != global_state.last_garbage_collected_view_num - { - tracing::warn!( - "Requesting for view {:?}, last decide-triggered cleanup on view {:?}, highest view num is {:?}", - view_num, - global_state.last_garbage_collected_view_num, - global_state.highest_view_num_builder_id.parent_view - ); - return Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided); - } - } - - let (response_sender, response_receiver) = unbounded(); - let req_msg = RequestMessage { - state_id: state_id.clone(), - response_channel: response_sender, - }; let timeout_after = starting_time + self.max_api_waiting_time; - let check_duration = self.max_api_waiting_time / 10; + let check_duration = self.max_api_waiting_time / 10; let time_to_wait_for_matching_builder = starting_time + self.max_api_waiting_time / 2; + let matching_builder = loop { + match self.coordinator.lookup_builder_state(&state_id).await { + BuilderStateLookup::Found(builder) => break Some(builder), + BuilderStateLookup::Decided => { + // TODO: + // tracing::warn!( + // "Requesting for view {:?}, last decide-triggered cleanup on view {:?}, highest view num is {:?}", + // view_num, + // todo!(), + // todo!() + // ); + return Err(BuildError::NotFound); + } + BuilderStateLookup::NotFound => { + if Instant::now() > time_to_wait_for_matching_builder { + break None; + } else { + async_sleep(check_duration).await; + continue; + } + } + }; + }; - let mut sent = false; - while Instant::now() < time_to_wait_for_matching_builder { - // try to broadcast the request to the correct builder state - let found_builder_state = { - let global_state_read_lock_guard = self.global_state.read_arc().await; + let builder = if let Some(matching) = matching_builder { + matching + } else if let Some(highest_view) = self.coordinator.highest_view_builder().await { + highest_view + } else if let Some(last_built_block) = self.block_cache.read().await.get(&state_id) { + todo!() + } else { + return Err(BuildError::NotFound); + }; - global_state_read_lock_guard - .spawned_builder_states - .get(&state_id) - .cloned() - }; + let timeout_after = Instant::now() + self.maximize_txn_capture_timeout; + let sleep_interval = self.maximize_txn_capture_timeout / 10; - if let Some(id_and_sender) = found_builder_state { - tracing::info!( - "Got matching BlockBuilder for {state_id}, sending get_available_blocks request", - ); + while Instant::now() <= timeout_after { + let queue_populated = builder.collect_txns(timeout_after).await; - if let Err(e) = id_and_sender - .1 - .broadcast(MessageType::RequestMessage(req_msg.clone())) - .await - { - tracing::warn!("Error {e} sending get_available_blocks request for {state_id}",); - } - sent = true; + if queue_populated || Instant::now() + sleep_interval > timeout_after { + // we don't have time for another iteration break; } - tracing::info!("Failed to get matching BlockBuilder for {state_id}, will try again",); - async_sleep(check_duration).await; + async_sleep(sleep_interval).await } - if !sent { - // broadcast the request to the best fallback builder state - if let Err(e) = self - .global_state - .read_arc() - .await - .get_channel_for_matching_builder_or_highest_view_builder(&state_id)? - .broadcast(MessageType::RequestMessage(req_msg.clone())) - .await - { - tracing::warn!( - "Error {e} sending get_available_blocks request for parent {state_id}", - ); - } + // TODO: + let should_prioritize_finalization = false; + // let should_prioritize_finalization = self + // .allow_empty_block_until + // .map(|until| state_id.parent_view < until) + // .unwrap_or(false); + + if builder.txn_queue.read().await.is_empty() && !should_prioritize_finalization { + // Don't build an empty block + return Ok(vec![]); } - tracing::debug!("Waiting for response for get_available_blocks with parent {state_id}",); + let max_block_size = self.block_size_limits.read().await.max_block_size; - let response_received = loop { - match async_timeout(check_duration, response_receiver.recv()).await { - Err(toe) => { - if Instant::now() >= timeout_after { - tracing::debug!(%toe, "Couldn't get available blocks in time for parent {state_id}"); - // lookup into the builder_state_to_last_built_block, if it contains the result, return that otherwise return error - if let Some(last_built_block) = self - .global_state - .read_arc() - .await - .builder_state_to_last_built_block - .get(&state_id) - { - tracing::info!("Returning last built block for parent {state_id}",); - break Ok(last_built_block.clone()); - } - break Err(AvailableBlocksError::NoBlocksAvailable); - } - continue; - } - Ok(recv_attempt) => { - if let Err(ref e) = recv_attempt { - tracing::error!(%e, "Channel closed while getting available blocks for parent {state_id}"); - } - break recv_attempt - .map_err(|_| AvailableBlocksError::ChannelUnexpectedlyClosed); + let transactions_to_include = builder + .txn_queue + .read() + .await + .transactions + .iter() + .scan(0, |total_size, tx| { + let prev_size = *total_size; + *total_size += tx.min_block_size; + // We will include one transaction over our target block length + // if it's the first transaction in queue, otherwise we'd have a possible failure + // state where a single transaction larger than target block state is stuck in + // queue and we just build empty blocks forever + if *total_size >= max_block_size && prev_size != 0 { + None + } else { + Some(tx.transaction.clone()) } - } - }; + }) + .collect::>(); - match response_received { - Ok(response) => { - let (pub_key, sign_key) = self.builder_keys.clone(); - // sign over the block info - let signature_over_block_info = - ::BuilderSignatureKey::sign_block_info( - &sign_key, - response.block_size, - response.offered_fee, - &response.builder_hash, - ) - .map_err(AvailableBlocksError::SigningBlockFailed)?; + let Ok((payload, metadata)) = + >::from_transactions( + transactions_to_include, + &builder.validated_state, + &self.instance_state, + ) + .await + else { + tracing::warn!("build block, returning None"); + return Err(BuildError::Error("Failed to build a block".to_string())); + }; - // insert the block info into local hashmap - let initial_block_info = AvailableBlockInfo:: { - block_hash: response.builder_hash.clone(), - block_size: response.block_size, - offered_fee: response.offered_fee, - signature: signature_over_block_info, - sender: pub_key.clone(), - _phantom: Default::default(), - }; - tracing::info!( - "Sending available Block info response for {state_id} with block hash: {:?}", - response.builder_hash - ); - Ok(vec![initial_block_info]) + let builder_hash = payload.builder_commitment(&metadata); + // count the number of txns + let actual_txn_count = payload.num_transactions(&metadata); + let truncated = actual_txn_count == 0; + + // Payload is empty despite us checking that tx_queue isn't empty earlier. + // + // This means that the block was truncated due to *sequencer* block length + // limits, which are different from our `max_block_size`. There's no good way + // for us to check for this in advance, so we detect transactions too big for + // the sequencer indirectly, by observing that we passed some transactions + // to `>::from_transactions`, but + // it returned an empty block. + // Thus we deduce that the first transaction in our queue is too big to *ever* + // be included, because it alone goes over sequencer's block size limit. + if truncated { + builder.txn_queue.write().await.pop_front(); + if !should_prioritize_finalization { + return Ok(vec![]); } + } + + let encoded_txns: Vec = payload.encode().to_vec(); + let block_size: u64 = encoded_txns.len() as u64; + let offered_fee: u64 = self.base_fee * block_size; + + // Get the number of nodes stored while processing the `claim_block_with_num_nodes` request + // or upon initialization. + let num_nodes = self.num_nodes.load(Ordering::SeqCst); - // We failed to get available blocks - Err(e) => { - tracing::debug!("Failed to get available blocks for parent {state_id}",); - Err(e) + let fut = async move { + #[cfg(async_executor_impl = "tokio")] + { + let join_handle = tokio::task::spawn_blocking(move || { + precompute_vid_commitment(&encoded_txns, num_nodes) + }); + join_handle.await.unwrap() } - } - } + #[cfg(async_executor_impl = "async-std")] + { + let join_handle = async_std::task::spawn_blocking(move || { + precompute_vid_commitment(&encoded_txns, num_nodes) + }); + join_handle.await + } + }; + + tracing::info!( + "Builder view num {:?}, building block with {:?} txns, with builder hash {:?}", + builder.parent_block_references.view_number, + actual_txn_count, + builder_hash + ); + + let (pub_key, sign_key) = self.builder_keys.clone(); + // sign over the block info + let signature_over_block_info = ::BuilderSignatureKey::sign_block_info( + &sign_key, + block_size, + offered_fee, + &builder_hash, + ) + .map_err(|_| BuildError::Error("Failed to sign".to_owned()))?; - async fn claim_block_implementation( - &self, - block_hash: &BuilderCommitment, - view_number: u64, - sender: Types::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, ClaimBlockError> { let block_id = BlockId { - hash: block_hash.clone(), + hash: payload.builder_commitment(&metadata), view: Types::View::new(view_number), }; + let info: BlockInfo = BlockInfo { + block_payload: payload, + metadata, + vid_data: WaitAndKeep::new(Box::pin(fut)), + offered_fee, + truncated, + }; - tracing::info!("Received request for claiming block {block_id}",); - // verify the signature - if !sender.validate(signature, block_id.hash.as_ref()) { - tracing::error!("Signature validation failed in claim block"); - return Err(ClaimBlockError::SignatureValidationFailed); - } - let (pub_key, sign_key) = self.builder_keys.clone(); - - let extracted_block_info_option = { - // We store this write lock guard separately to make it explicit - // that this will end up holding a lock for the duration of this - // closure. - // - // Additionally, we clone the properties from the block_info that - // end up being cloned if found anyway. Since we know this already - // we can perform the clone here to avoid holding the lock for - // longer than needed. - let mut global_state_write_lock_guard = self.global_state.write_arc().await; - let block_info_some = global_state_write_lock_guard.blocks.get(&block_id); + self.blocks.write().await.put(block_id.clone(), info); + self.block_cache.write().await.insert(state_id, block_id); - block_info_some.map(|block_info| { - ( - block_info.vid_trigger.clone(), - block_info.block_payload.clone(), - block_info.metadata.clone(), - ) - }) + let initial_block_info = AvailableBlockInfo:: { + block_hash: builder_hash.clone(), + block_size, + offered_fee, + signature: signature_over_block_info, + sender: pub_key.clone(), + _phantom: Default::default(), }; - if let Some((vid_trigger, block_payload, metadata)) = extracted_block_info_option { - tracing::info!("Trying sending vid trigger info for {block_id}",); + Ok(vec![initial_block_info]) + } - if let Some(trigger_writer) = vid_trigger.write().await.take() { - tracing::info!("Sending vid trigger for {block_id}"); - trigger_writer.send(TriggerStatus::Start); - tracing::info!("Sent vid trigger for {block_id}"); - } - tracing::info!("Done Trying sending vid trigger info for {block_id}",); + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + todo!() + } - // sign over the builder commitment, as the proposer can computer it based on provide block_payload - // and the metadata - let response_block_hash = block_payload.builder_commitment(&metadata); - let signature_over_builder_commitment = - ::BuilderSignatureKey::sign_builder_message( - &sign_key, - response_block_hash.as_ref(), - ) - .map_err(ClaimBlockError::SigningCommitmentFailed)?; + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: ::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError> { + // Update the stored `num_nodes` with the given value, which will be used for VID computation. + self.num_nodes.store(num_nodes, Ordering::Relaxed); - let block_data = AvailableBlockData:: { - block_payload: block_payload.clone(), - metadata: metadata.clone(), - signature: signature_over_builder_commitment, - sender: pub_key.clone(), - }; - tracing::info!("Sending Claim Block data for {block_id}",); - Ok(block_data) - } else { - tracing::warn!("Claim Block not found"); - Err(ClaimBlockError::BlockDataNotFound) - } + self.claim_block(block_hash, view_number, sender, signature) + .await } - async fn claim_block_header_input_implementation( + async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, view_number: u64, sender: Types::SignatureKey, signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, ClaimBlockHeaderInputError> { - let id = BlockId { - hash: block_hash.clone(), - view: Types::View::new(view_number), - }; - - tracing::info!("Received request for claiming block header input for block {id}"); - // verify the signature - if !sender.validate(signature, id.hash.as_ref()) { - tracing::error!("Signature validation failed in claim block header input"); - return Err(ClaimBlockHeaderInputError::SignatureValidationFailed); - } - let (pub_key, sign_key) = self.builder_keys.clone(); + ) -> Result, BuildError> { + todo!() + } - let extracted_block_info_option = { - // We store this write lock guard separately to make it explicit - // that this will end up holding a lock for the duration of this - // closure. - // - // Additionally, we clone the properties from the block_info that - // end up being cloned if found anyway. Since we know this already - // we can perform the clone here to avoid holding the lock for - // longer than needed. - let mut global_state_write_lock_guard = self.global_state.write_arc().await; - let block_info_some = global_state_write_lock_guard.blocks.get(&id); + /// Returns the public key of the builder + async fn builder_address( + &self, + ) -> Result<::BuilderSignatureKey, BuildError> { + Ok(self.builder_keys.0.clone()) + } +} - block_info_some.map(|block_info| { - ( - block_info.vid_receiver.clone(), - block_info.metadata.clone(), - block_info.offered_fee, - block_info.truncated, - ) +#[async_trait] +impl AcceptsTxnSubmits for ProxyGlobalState { + async fn submit_txns( + &self, + txns: Vec<::Transaction>, + ) -> Result::Transaction>>, BuildError> { + txns.into_iter() + .map(|txn| ReceivedTransaction::new(txn, TransactionSource::Private)) + .map(|txn| async { + let commit = txn.commit; + self.coordinator + .handle_transaction(txn) + .await + .map(|_| commit) }) - }; - - if let Some((vid_receiver, metadata, offered_fee, truncated)) = extracted_block_info_option - { - tracing::info!("Waiting for vid commitment for block {id}"); - - let timeout_after = Instant::now() + self.max_api_waiting_time; - let check_duration = self.max_api_waiting_time / 10; - - let response_received = loop { - match async_timeout(check_duration, vid_receiver.write().await.get()).await { - Err(_toe) => { - if Instant::now() >= timeout_after { - tracing::warn!("Couldn't get vid commitment in time for block {id}",); - { - // we can't keep up with this block size, reduce max block size - self.global_state - .write_arc() - .await - .block_size_limits - .decrement_block_size(); - } - break Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime); - } - continue; - } - Ok(recv_attempt) => { - if recv_attempt.is_err() { - tracing::error!( - "Channel closed while getting vid commitment for block {id}", - ); - } - break recv_attempt - .map_err(ClaimBlockHeaderInputError::WaitAndKeepGetError); - } - } - }; - - tracing::info!("Got vid commitment for block {id}",); - - // We got VID in time with margin left. - // Maybe we can handle bigger blocks? - if timeout_after.duration_since(Instant::now()) - > self.max_api_waiting_time / VID_RESPONSE_TARGET_MARGIN_DIVISOR - { - // Increase max block size - self.global_state - .write_arc() - .await - .block_size_limits - .try_increment_block_size(truncated); - } - - match response_received { - Ok((vid_commitment, vid_precompute_data)) => { - // sign over the vid commitment - let signature_over_vid_commitment = - ::BuilderSignatureKey::sign_builder_message( - &sign_key, - vid_commitment.as_ref(), - ) - .map_err(ClaimBlockHeaderInputError::FailedToSignVidCommitment)?; - - let signature_over_fee_info = Types::BuilderSignatureKey::sign_fee( - &sign_key, - offered_fee, - &metadata, - &vid_commitment, - ) - .map_err(ClaimBlockHeaderInputError::FailedToSignFeeInfo)?; - - let response = AvailableBlockHeaderInput:: { - vid_commitment, - vid_precompute_data, - fee_signature: signature_over_fee_info, - message_signature: signature_over_vid_commitment, - sender: pub_key.clone(), - }; - tracing::info!("Sending Claim Block Header Input response for {id}",); - Ok(response) - } - Err(err) => { - tracing::warn!("Claim Block Header Input not found"); - Err(err) - } - } - } else { - tracing::warn!("Claim Block Header Input not found"); - Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) - } - } -} - -/* -Handling Builder API responses -*/ -#[async_trait] -impl BuilderDataSource for ProxyGlobalState -where - for<'a> <::PureAssembledSignatureType as TryFrom< - &'a TaggedBase64, - >>::Error: Display, - for<'a> >::Error: Display, -{ - async fn available_blocks( - &self, - for_parent: &VidCommitment, - view_number: u64, - sender: Types::SignatureKey, - signature: &::PureAssembledSignatureType, - ) -> Result>, BuildError> { - Ok(self - .available_blocks_implementation(for_parent, view_number, sender, signature) - .await?) - } - - async fn claim_block( - &self, - block_hash: &BuilderCommitment, - view_number: u64, - sender: Types::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { - Ok(self - .claim_block_implementation(block_hash, view_number, sender, signature) - .await?) - } - - async fn claim_block_with_num_nodes( - &self, - block_hash: &BuilderCommitment, - view_number: u64, - sender: ::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - num_nodes: usize, - ) -> Result, BuildError> { - // Update the stored `num_nodes` with the given value, which will be used for VID computation. - self.global_state.write_arc().await.num_nodes = num_nodes; - - self.claim_block(block_hash, view_number, sender, signature) - .await - } - - async fn claim_block_header_input( - &self, - block_hash: &BuilderCommitment, - view_number: u64, - sender: Types::SignatureKey, - signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> Result, BuildError> { - Ok(self - .claim_block_header_input_implementation(block_hash, view_number, sender, signature) - .await?) - } - - /// Returns the public key of the builder - async fn builder_address( - &self, - ) -> Result<::BuilderSignatureKey, BuildError> { - Ok(self.builder_keys.0.clone()) - } -} - -#[async_trait] -impl AcceptsTxnSubmits for ProxyGlobalState { - async fn submit_txns( - &self, - txns: Vec<::Transaction>, - ) -> Result::Transaction>>, BuildError> { - tracing::debug!( - "Submitting {:?} transactions to the builder states{:?}", - txns.len(), - txns.iter().map(|txn| txn.commit()).collect::>() - ); - let response = self - .global_state - .read_arc() + .collect::>() + .try_collect() .await - .submit_client_txns(txns) - .await; - - tracing::debug!( - "Transaction submitted to the builder states, sending response: {:?}", - response - ); - - // NOTE: ideally we want to respond with original Vec - // instead of Result not to loose any information, - // but this requires changes to builder API - response.into_iter().collect() } } #[async_trait] @@ -1074,3489 +495,4377 @@ impl ReadState for ProxyGlobalState { } } -/* -Running Non-Permissioned Builder Service -*/ -pub async fn run_non_permissioned_standalone_builder_service< - Types: NodeType, - Ver: StaticVersionType, - S: Stream> + Unpin, ->( - // sending a DA proposal from the hotshot to the builder states - da_sender: BroadcastSender>, - - // sending a Quorum proposal from the hotshot to the builder states - quorum_sender: BroadcastSender>, - - // sending a Decide event from the hotshot to the builder states - decide_sender: BroadcastSender>, - - // HotShot event stream - hotshot_event_stream: S, - - // Global state - global_state: Arc>>, -) -> Result<(), anyhow::Error> { - let tx_sender = { - // This closure is likely unnecessary, but we want to play it safe - // with our RWLocks. - let global_state_read_lock_guard = global_state.read_arc().await; - global_state_read_lock_guard.tx_sender.clone() - }; - let mut hotshot_event_stream = std::pin::pin!(hotshot_event_stream); - - loop { - let Some(event) = hotshot_event_stream.next().await else { - anyhow::bail!("Event stream ended"); - }; - - match event.event { - EventType::Error { error } => { - tracing::error!("Error event in HotShot: {:?}", error); - } - // tx event - EventType::Transactions { transactions } => { - let max_block_size = { - // This closure is likely unnecessary, but we want - // to play it safe with our RWLocks. - let global_state_read_lock_guard = global_state.read_arc().await; - global_state_read_lock_guard - .block_size_limits - .max_block_size - }; - - handle_received_txns( - &tx_sender, - transactions, - TransactionSource::HotShot, - max_block_size, - ) - .await; - } - // decide event - EventType::Decide { - block_size: _, - leaf_chain, - qc: _, - } => { - let latest_decide_view_num = leaf_chain[0].leaf.view_number(); - handle_decide_event(&decide_sender, latest_decide_view_num).await; - } - // DA proposal event - EventType::DaProposal { proposal, sender } => { - handle_da_event(&da_sender, Arc::new(proposal), sender).await; - } - // QC proposal event - EventType::QuorumProposal { proposal, sender } => { - // get the leader for current view - handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; - } - _ => { - tracing::debug!("Unhandled event from Builder"); - } - } - } -} - -/// [`HandleDaEventError`] represents the internal class of errors that can -/// occur when attempting to process an incoming da proposal event. More -/// specifically these are the class of error that can be returned from -/// [`handle_da_event_implementation`]. -#[derive(Debug)] -enum HandleDaEventError { - SignatureValidationFailed, - BroadcastFailed(async_broadcast::SendError>), -} - -/// [`handle_da_event`] is a utility function that will attempt to broadcast the -/// given `da_proposal` to the given `da_channel_sender` if the given details -/// pass validation checks, and the [`BroadcastSender`] `da_channel_sender` is -/// still open. -async fn handle_da_event( - da_channel_sender: &BroadcastSender>, - da_proposal: Arc>>, - sender: ::SignatureKey, -) { - // We're explicitly not inspecting this error, as this function is not - // expected to return an error or any indication of an error. - let _ = handle_da_event_implementation(da_channel_sender, da_proposal, sender).await; -} - -/// [`handle_da_event_implementation`] is a utility function that will attempt -/// to broadcast the given `da_proposal` to the given `da_channel_sender` if the -/// given details pass all relevant checks. -/// -/// There are only three conditions under which this will fail to send the -/// message via the given `da_channel_sender`, and they are all represented -/// via [`HandleDaEventError`]. They are as follows: -/// - [`HandleDaEventError::SignatureValidationFailed`]: The signature validation failed -/// - [`HandleDaEventError::BroadcastFailed`]: The broadcast failed as no receiver -/// is in place to receive the message -/// -/// This function is the implementation for [`handle_da_event`]. -async fn handle_da_event_implementation( - da_channel_sender: &BroadcastSender>, - da_proposal: Arc>>, - sender: ::SignatureKey, -) -> Result<(), HandleDaEventError> { - tracing::debug!( - "DaProposal: Leader: {:?} for the view: {:?}", - sender, - da_proposal.data.view_number - ); - - // get the encoded transactions hash - let encoded_txns_hash = Sha256::digest(&da_proposal.data.encoded_transactions); - // check if the sender is the leader and the signature is valid; if yes, broadcast the DA proposal - - if !sender.validate(&da_proposal.signature, &encoded_txns_hash) { - tracing::error!( - "Validation Failure on DaProposal for view {:?}: Leader: {:?}", - da_proposal.data.view_number, - sender - ); - return Err(HandleDaEventError::SignatureValidationFailed); - } - - let da_msg = DaProposalMessage:: { - proposal: da_proposal, - sender, - }; - - let view_number = da_msg.proposal.data.view_number; - tracing::debug!( - "Sending DA proposal to the builder states for view {:?}", - view_number - ); - - if let Err(e) = da_channel_sender - .broadcast(MessageType::DaProposalMessage(da_msg)) - .await - { - tracing::warn!( - "Error {e}, failed to send DA proposal to builder states for view {:?}", - view_number - ); - - return Err(HandleDaEventError::BroadcastFailed(e)); - } - - Ok(()) -} - -/// [`HandleQuorumEventError`] represents the internal class of errors that can -/// occur when attempting to process an incoming quorum proposal event. More -/// specifically these are the class of error that can be returned from -/// [`handle_quorum_event_implementation`]. -#[derive(Debug)] -enum HandleQuorumEventError { - SignatureValidationFailed, - BroadcastFailed(async_broadcast::SendError>), -} - -/// [`handle_quorum_event`] is a utility function that will attempt to broadcast the -/// given `quorum_proposal` to the given `quorum_channel_sender` if the given details -/// pass validation checks, and the [`BroadcastSender`] `quorum_channel_sender` is -/// still open. -async fn handle_quorum_event( - quorum_channel_sender: &BroadcastSender>, - quorum_proposal: Arc>>, - sender: ::SignatureKey, -) { - // We're explicitly not inspecting this error, as this function is not - // expected to return an error or any indication of an error. - let _ = - handle_quorum_event_implementation(quorum_channel_sender, quorum_proposal, sender).await; -} - -/// Utility function that will attempt to broadcast the given `quorum_proposal` -/// to the given `quorum_channel_sender` if the given details pass all relevant checks. -/// -/// There are only three conditions under which this will fail to send the -/// message via the given `quorum_channel_sender`, and they are all represented -/// via [`HandleQuorumEventError`]. They are as follows: -/// - [`HandleQuorumEventError::SignatureValidationFailed`]: The signature validation failed -/// - [`HandleQuorumEventError::BroadcastFailed`]: The broadcast failed as no receiver -/// is in place to receive the message -/// -/// This function is the implementation for [`handle_quorum_event`]. -async fn handle_quorum_event_implementation( - quorum_channel_sender: &BroadcastSender>, - quorum_proposal: Arc>>, - sender: ::SignatureKey, -) -> Result<(), HandleQuorumEventError> { - tracing::debug!( - "QuorumProposal: Leader: {:?} for the view: {:?}", - sender, - quorum_proposal.data.view_number - ); - - let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); - - if !sender.validate(&quorum_proposal.signature, leaf.legacy_commit().as_ref()) { - tracing::error!( - "Validation Failure on QuorumProposal for view {:?}: Leader for the current view: {:?}", - quorum_proposal.data.view_number, - sender - ); - return Err(HandleQuorumEventError::SignatureValidationFailed); - } - - let quorum_msg = QuorumProposalMessage:: { - proposal: quorum_proposal, - sender, - }; - let view_number = quorum_msg.proposal.data.view_number; - tracing::debug!( - "Sending Quorum proposal to the builder states for view {:?}", - view_number - ); - - if let Err(e) = quorum_channel_sender - .broadcast(MessageType::QuorumProposalMessage(quorum_msg)) - .await - { - tracing::warn!( - "Error {e}, failed to send Quorum proposal to builder states for view {:?}", - view_number - ); - return Err(HandleQuorumEventError::BroadcastFailed(e)); - } - - Ok(()) -} - -async fn handle_decide_event( - decide_channel_sender: &BroadcastSender>, - latest_decide_view_number: Types::View, -) { - let decide_msg: DecideMessage = DecideMessage:: { - latest_decide_view_number, - }; - tracing::debug!( - "Sending Decide event to builder states for view {:?}", - latest_decide_view_number - ); - if let Err(e) = decide_channel_sender - .broadcast(MessageType::DecideMessage(decide_msg)) - .await - { - tracing::warn!( - "Error {e}, failed to send Decide event to builder states for view {:?}", - latest_decide_view_number - ); - } -} - -#[derive(Debug)] -enum HandleReceivedTxnsError { - TransactionTooBig { - estimated_length: u64, - max_txn_len: u64, - }, - - TooManyTransactions, - - Internal(TrySendError>>), -} - -impl From> for BuildError { - fn from(error: HandleReceivedTxnsError) -> Self { - match error { - HandleReceivedTxnsError::TransactionTooBig { - estimated_length, - max_txn_len, - } => BuildError::Error(format!("Transaction too big (estimated length {estimated_length}, currently accepting <= {max_txn_len})")), - HandleReceivedTxnsError::TooManyTransactions => BuildError::Error("Too many transactions".to_owned()), - HandleReceivedTxnsError::Internal(err) => BuildError::Error(format!("Internal error when submitting transaction: {}", err)), - } - } -} - -impl From>>> - for HandleReceivedTxnsError -{ - fn from(err: TrySendError>>) -> Self { - match err { - TrySendError::Full(_) => HandleReceivedTxnsError::TooManyTransactions, - err => HandleReceivedTxnsError::Internal(err), - } - } -} - -/// Utility function that will take the given list -/// of transactions, `txns`, wraps them in a [`ReceivedTransaction`] struct -/// and attempt to broadcast them to the given transaction [`BroadcastSender`] -/// `tx_sender`. The broadcast itself it a non-blocking operation, and any -/// failures of the broadcast are collected into the returned vector -/// of [Result]s. -/// -/// There is also a `max_txn_len` parameter that is used to check to ensure -/// that transactions that exceed this threshold will also not be broadcasted. -pub(crate) async fn handle_received_txns( - tx_sender: &BroadcastSender>>, - txns: Vec, - source: TransactionSource, - max_txn_len: u64, -) -> Vec::Transaction>, BuildError>> { - HandleReceivedTxns::new(tx_sender.clone(), txns, source, max_txn_len) - .map(|res| res.map_err(Into::into)) - .collect() -} - -/// `HandleReceivedTxns` is a struct that is used to handle the processing of -/// the function [`handle_received_txns`]. In order to avoid the need to -/// double allocate a [Vec] from processing these entries, this struct exists -/// to be processed as an [Iterator] instead. -struct HandleReceivedTxns { - tx_sender: BroadcastSender>>, - txns: Vec, - source: TransactionSource, - max_txn_len: u64, - offset: usize, - txns_length: usize, - time_in: Instant, -} - -impl HandleReceivedTxns { - fn new( - tx_sender: BroadcastSender>>, - txns: Vec, - source: TransactionSource, - max_txn_len: u64, - ) -> Self { - let txns_length = txns.len(); - - Self { - tx_sender, - txns, - source, - max_txn_len, - offset: 0, - txns_length, - time_in: Instant::now(), - } - } -} - -impl Iterator for HandleReceivedTxns -where - Types::Transaction: Transaction, -{ - type Item = - Result::Transaction>, HandleReceivedTxnsError>; - - fn next(&mut self) -> Option { - if self.txns.is_empty() { - return None; - } - - if self.offset >= self.txns_length { - return None; - } - - let offset = self.offset; - // increment the offset so we can ensure we're making progress; - self.offset += 1; - - let tx = self.txns[offset].clone(); - let commit = tx.commit(); - // This is a rough estimate, but we don't have any other way to get real - // encoded transaction length. Luckily, this being roughly proportional - // to encoded length is enough, because we only use this value to estimate - // our limitations on computing the VID in time. - let len = tx.minimum_block_size(); - let max_txn_len = self.max_txn_len; - if len > max_txn_len { - tracing::warn!(%commit, %len, %max_txn_len, "Transaction too big"); - return Some(Err(HandleReceivedTxnsError::TransactionTooBig { - estimated_length: len, - max_txn_len: self.max_txn_len, - })); - } - - let res = self - .tx_sender - .try_broadcast(Arc::new(ReceivedTransaction { - tx, - source: self.source.clone(), - commit, - time_in: self.time_in, - len, - })) - .inspect(|val| { - if let Some(evicted_txn) = val { - tracing::warn!( - "Overflow mode enabled, transaction {} evicted", - evicted_txn.commit - ); - } - }) - .map(|_| commit) - .inspect_err(|err| { - tracing::warn!("Failed to broadcast txn with commit {:?}: {}", commit, err); - }) - .map_err(HandleReceivedTxnsError::from); - - Some(res) - } - - fn size_hint(&self) -> (usize, Option) { - ( - self.txns_length - self.offset, - Some(self.txns.capacity() - self.offset), - ) - } -} - -#[cfg(test)] -mod test { - use std::{sync::Arc, time::Duration}; - - use async_compatibility_layer::channel::unbounded; - use async_lock::RwLock; - use committable::Commitment; - use futures::StreamExt; - use hotshot::{ - traits::BlockPayload, - types::{BLSPubKey, SignatureKey}, - }; - use hotshot_builder_api::v0_2::block_info::AvailableBlockInfo; - use hotshot_example_types::{ - block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, - state_types::{TestInstanceState, TestValidatedState}, - }; - use hotshot_types::{ - data::{DaProposal, Leaf, QuorumProposal, ViewNumber}, - message::Proposal, - simple_certificate::QuorumCertificate, - traits::{ - block_contents::{precompute_vid_commitment, vid_commitment}, - node_implementation::ConsensusTime, - signature_key::BuilderSignatureKey, - }, - utils::BuilderCommitment, - }; - use marketplace_builder_shared::{ - block::{BlockId, BuilderStateId, ParentBlockReferences}, - testing::constants::{ - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_NUM_NODES_IN_VID_COMPUTATION, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - }, - }; - use sha2::{Digest, Sha256}; - - use crate::{ - builder_state::{ - BuildBlockInfo, MessageType, RequestMessage, ResponseMessage, TransactionSource, - TriggerStatus, - }, - service::{BlockSizeLimits, HandleReceivedTxnsError}, - LegacyCommit, - }; - - use super::{ - handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, - BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, - HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, - }; - - /// A const number on `max_tx_len` to be used consistently spanning all the tests - /// It is set to 1 as current estimation on `TestTransaction` is 1 - const TEST_MAX_TX_LEN: u64 = 1; - - // GlobalState Tests - - // GlobalState::new Tests - - /// This test checks a [GlobalState] created from [GlobalState::new] has - /// the appropriate values stored within it. - #[async_std::test] - async fn test_global_state_new() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(1), - ViewNumber::new(2), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - assert_eq!(state.blocks.len(), 0, "The blocks LRU should be empty"); - - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(1), - }; - - // There should be a single entry within the spawned_builder_states, - // and it should be the one that was just created. - assert_eq!( - state.spawned_builder_states.len(), - 1, - "There should be a single entry in the spawned builder states hashmap" - ); - - assert!(state.spawned_builder_states.contains_key(&builder_state_id), "The spawned builder states should contain an entry with the bootstrapped parameters passed into new"); - - assert!(!state.spawned_builder_states.contains_key(&BuilderStateId { parent_commitment: parent_commit, parent_view: ViewNumber::new(0) }), "The spawned builder states should not contain any other entry, as such it should not contain any entry with a higher view number, but the same parent commit"); - - // We can't compare the Senders directly - - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(2), - "The last garbage collected view number should be the one passed into new" - ); - - assert_eq!( - state.builder_state_to_last_built_block.len(), - 0, - "The builder state to last built block should be empty" - ); - - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should be the bootstrapped build state id" - ); - - assert_eq!( - state.block_size_limits.protocol_max_block_size, TEST_PROTOCOL_MAX_BLOCK_SIZE, - "The protocol max block size should be the one passed into new" - ); - - assert_eq!( - state.block_size_limits.max_block_size, state.block_size_limits.protocol_max_block_size, - "The max block size should be initialized to protocol max block size" - ); - } - - // GlobalState::register_builder_state Tests - - /// This test checks that the [GlobalState::register_builder_state] function - /// will correctly register a new builder state, and that the highest view - /// number builder id will be updated to the new builder state id. - /// Additionally, it will check that the spawned builder states hashmap - /// will contain the new builder state id. - #[async_std::test] - async fn test_global_state_register_builder_state_different_states() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - { - let (req_sender, _) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(5), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 2, - "The spawned_builder_states should now have 2 elements in it" - ); - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should now be the one that was just registered" - ); - assert!( - state.spawned_builder_states.contains_key(&builder_state_id), - "The spawned builder states should contain the new builder state id" - ); - }; - - { - let (req_sender, _) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(6), - }; - - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(6), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 3, - "The spawned_builder_states should now have 3 elements in it" - ); - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should now be the one that was just registered" - ); - assert!( - state.spawned_builder_states.contains_key(&builder_state_id), - "The spawned builder states should contain the new builder state id" - ); - }; - } - - /// This test checks that the register_builder_state method will overwrite - /// the previous sender in the `spawned_builder_states` hashmap if the same - /// `BuilderStateId` is used to register a new sender. - /// - /// It also demonstrates that doing this will drop the previous sender, - /// effectively closing it if it is the only reference to it. - #[async_std::test] - async fn test_global_state_register_builder_state_same_builder_state_id() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - let mut req_receiver_1 = { - let (req_sender, req_receiver) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(5), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 2, - "The spawned_builder_states should now have 2 elements in it" - ); - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should now be the one that was just registered" - ); - - req_receiver - }; - - let mut req_receiver_2 = { - let (req_sender, req_receiver) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - - // This is the same BuilderStateId as the previous one, so it should - // replace the previous one. Which means that the previous one - // may no longer be published to. - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(5), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 2, - "The spawned_builder_states should still have 2 elements in it" - ); - assert_eq!(state.highest_view_num_builder_id, builder_state_id, "The highest view number builder id should still be the one that was just registered"); - - req_receiver - }; - - { - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - - let req_id_and_sender = state.spawned_builder_states.get(&builder_state_id).unwrap(); - let (response_sender, _) = unbounded(); - - assert!( - req_id_and_sender - .1 - .broadcast(MessageType::RequestMessage(RequestMessage { - state_id: builder_state_id, - response_channel: response_sender, - })) - .await - .is_ok(), - "This should be able to send a Message through the sender" - ); - } - - // The first receiver should have been replaced, so we won't get any - // results from it. - - assert!( - req_receiver_1.recv().await.is_err(), - "This first receiver should be closed" - ); - assert!( - req_receiver_2.recv().await.is_ok(), - "The second receiver should receive a message" - ); - } - - /// This test checks that the register_builder_state method will only - /// update the highest_view_num_builder_id if the new [BuilderStateId] has - /// a higher view number than the current highest_view_num_builder_id. - #[async_std::test] - async fn test_global_state_register_builder_state_decrementing_builder_state_ids() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - { - let (req_sender, _) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(6), - }; - - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(6), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 2, - "The spawned_builder_states should now have 2 elements in it" - ); - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should now be the one that was just registered" - ); - assert!( - state.spawned_builder_states.contains_key(&builder_state_id), - "The spawned builder states should contain the new builder state id" - ); - }; - - { - let (req_sender, _) = async_broadcast::broadcast(10); - let builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - - state.register_builder_state( - builder_state_id.clone(), - ParentBlockReferences { - view_number: ViewNumber::new(5), - vid_commitment: parent_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - req_sender.clone(), - ); - - assert_eq!( - state.spawned_builder_states.len(), - 3, - "The spawned_builder_states should now have 3 elements in it" - ); - assert_eq!( - state.highest_view_num_builder_id, - BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(6) - }, - "The highest view number builder id should now be the one that was just registered" - ); - assert!( - state.spawned_builder_states.contains_key(&builder_state_id), - "The spawned builder states should contain the new builder state id" - ); - }; - } - - // GlobalState::update_global_state Tests - - /// This test checks that the update_global_state method will correctly - /// update the LRU blocks cache and the builder_state_to_last_built_block - /// hashmap with values derived from the parameters passed into the method. - /// - /// The assumption behind this test is that the values being stored were - /// not being stored previously. - #[async_std::test] - async fn test_global_state_update_global_state_success() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - let new_parent_commit = vid_commitment(&[], 9); - let new_view_num = ViewNumber::new(1); - let builder_state_id = BuilderStateId { - parent_commitment: new_parent_commit, - parent_view: new_view_num, - }; - - let builder_hash_1 = BuilderCommitment::from_bytes([1, 2, 3, 4]); - let block_id = BlockId { - hash: builder_hash_1, - view: new_view_num, - }; - - let (vid_trigger_sender, vid_trigger_receiver) = - async_compatibility_layer::channel::oneshot(); - let (vid_sender, vid_receiver) = unbounded(); - let (block_payload, metadata) = - >::from_transactions( - vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .unwrap(); - let offered_fee = 64u64; - let block_size = 64u64; - let truncated = false; - - let build_block_info = BuildBlockInfo { - id: block_id.clone(), - block_size, - offered_fee, - block_payload: block_payload.clone(), - metadata, - vid_trigger: vid_trigger_sender, - vid_receiver, - truncated, - }; - - let builder_hash_2 = BuilderCommitment::from_bytes([2, 3, 4, 5]); - let response_msg = ResponseMessage { - builder_hash: builder_hash_2.clone(), - block_size: 32, - offered_fee: 128, - }; - - // Now that every object is prepared and setup for storage, we can - // test the `update_global_state` method. - - // `update_global_state` has not return value from its method, so can - // only inspect its "success" based on the mutation of the state object. - state.update_global_state(builder_state_id.clone(), build_block_info, response_msg); - - // two things should be adjusted by `update_global_state`: - // - state.blocks - // - state.builder_state_to_last_built_block - - // start with blocks - - assert_eq!( - state.blocks.len(), - 1, - "The blocks LRU should have a single entry" - ); - - let retrieved_block_info = state.blocks.get(&block_id); - assert!( - retrieved_block_info.is_some(), - "Retrieval of the block id should result is a valid block info data" - ); - - let retrieved_block_info = retrieved_block_info.unwrap(); - - assert_eq!( - retrieved_block_info.block_payload, block_payload, - "The block payloads should match" - ); - assert_eq!( - retrieved_block_info.metadata, metadata, - "The metadata should match" - ); - assert_eq!( - retrieved_block_info.offered_fee, offered_fee, - "The offered fee should match" - ); - assert_eq!( - retrieved_block_info.truncated, truncated, - "The truncated flag should match" - ); - - { - // This ensures that the vid_trigger that is stored is still the - // same, or links to the vid_trigger_receiver that we submitted. - let mut vid_trigger_write_lock_guard = - retrieved_block_info.vid_trigger.write_arc().await; - if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { - vid_trigger_sender.send(TriggerStatus::Start); - } - - match vid_trigger_receiver.recv().await { - Ok(TriggerStatus::Start) => { - // This is expected - } - _ => { - panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } - } - } - - { - // This ensures that the vid_sender that is stored is still the - // same, or links to the vid_receiver that we submitted. - let (vid_commitment, vid_precompute) = - precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); - assert_eq!( - vid_sender - .send((vid_commitment, vid_precompute.clone())) - .await, - Ok(()), - "The vid_sender should be able to send the vid commitment and precompute" - ); - - let mut vid_receiver_write_lock_guard = - retrieved_block_info.vid_receiver.write_arc().await; - - // Get and Keep object - - match vid_receiver_write_lock_guard.get().await { - Ok((received_vid_commitment, received_vid_precompute)) => { - assert_eq!( - received_vid_commitment, vid_commitment, - "The received vid commitment should match the expected vid commitment" - ); - assert_eq!( - received_vid_precompute, vid_precompute, - "The received vid precompute should match the expected vid precompute" - ); - } - _ => { - panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); - } - } - } - - // finish with builder_state_to_last_built_block - - assert_eq!( - state.builder_state_to_last_built_block.len(), - 1, - "The builder state to last built block should have a single entry" - ); - - let last_built_block = state - .builder_state_to_last_built_block - .get(&builder_state_id); - - assert!( - last_built_block.is_some(), - "The last built block should be retrievable" - ); - - let last_built_block = last_built_block.unwrap(); - - assert_eq!( - last_built_block.builder_hash, builder_hash_2, - "The last built block id should match the block id" - ); - - assert_eq!( - last_built_block.block_size, 32, - "The last built block size should match the response message" - ); - - assert_eq!( - last_built_block.offered_fee, 128, - "The last built block offered fee should match the response message" - ); - } - - /// This test demonstrates the replacement behavior of the the - /// `update_global_state` method. - /// - /// When given a `BuilderStateId` that already exists in the `blocks` LRU, - /// and the `builder_state_to_last_built_block` hashmap, the method will - /// replace the values in the `builder_state_to_last_built_block` hashmap, - /// and it will also replace the entry in the `block`s LRU. - #[async_std::test] - async fn test_global_state_update_global_state_replacement() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - let new_parent_commit = vid_commitment(&[], 9); - let new_view_num = ViewNumber::new(1); - let builder_state_id = BuilderStateId { - parent_commitment: new_parent_commit, - parent_view: new_view_num, - }; - - let builder_hash = BuilderCommitment::from_bytes([1, 2, 3, 4]); - let block_id_1 = BlockId { - hash: builder_hash.clone(), - view: new_view_num, - }; - let (vid_trigger_sender_1, vid_trigger_receiver_1) = - async_compatibility_layer::channel::oneshot(); - let (vid_sender_1, vid_receiver_1) = unbounded(); - let (block_payload_1, metadata_1) = - >::from_transactions( - vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .unwrap(); - let offered_fee_1 = 64u64; - let block_size_1 = 64u64; - let truncated_1 = false; - let build_block_info_1 = BuildBlockInfo { - id: block_id_1.clone(), - block_size: block_size_1, - offered_fee: offered_fee_1, - block_payload: block_payload_1.clone(), - metadata: metadata_1, - vid_trigger: vid_trigger_sender_1, - vid_receiver: vid_receiver_1, - truncated: truncated_1, - }; - let response_msg_1 = ResponseMessage { - builder_hash: builder_hash.clone(), - block_size: block_size_1, - offered_fee: offered_fee_1, - }; - - // Now that every object is prepared and setup for storage, we can - // test the `update_global_state` method. - - // `update_global_state` has no return value from its method, so we can - // only inspect its "success" based on the mutation of the state object. - state.update_global_state(builder_state_id.clone(), build_block_info_1, response_msg_1); - - // We're going to enter another update_global_state_entry with the same - // builder_state_id, but with different values for the block info and - // response message. This should highlight that the values get replaced - // in this update. - - let block_id_2 = BlockId { - hash: builder_hash.clone(), - view: new_view_num, - }; - let (vid_trigger_sender_2, vid_trigger_receiver_2) = - async_compatibility_layer::channel::oneshot(); - let (vid_sender_2, vid_receiver_2) = unbounded(); - let (block_payload_2, metadata_2) = - >::from_transactions( - vec![TestTransaction::new(vec![2, 3, 4, 5, 6, 7, 8, 9, 10, 11])], - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .unwrap(); - let offered_fee_2 = 16u64; - let block_size_2 = 32u64; - let truncated_2 = true; - let build_block_info_2 = BuildBlockInfo { - id: block_id_2.clone(), - block_size: block_size_2, - offered_fee: offered_fee_2, - block_payload: block_payload_2.clone(), - metadata: metadata_2, - vid_trigger: vid_trigger_sender_2, - vid_receiver: vid_receiver_2, - truncated: truncated_2, - }; - let response_msg_2: ResponseMessage = ResponseMessage { - builder_hash: builder_hash.clone(), - block_size: block_size_2, - offered_fee: offered_fee_2, - }; - - // two things should be adjusted by `update_global_state`: - // When given the same build_state_ids. - state.update_global_state(builder_state_id.clone(), build_block_info_2, response_msg_2); - - // start with blocks - - assert_eq!( - state.blocks.len(), - 1, - "The blocks LRU should have a single entry" - ); - - let retrieved_block_info = state.blocks.get(&block_id_2); - assert!( - retrieved_block_info.is_some(), - "Retrieval of the block id should result is a valid block info data" - ); - - let retrieved_block_info = retrieved_block_info.unwrap(); - - assert_eq!( - retrieved_block_info.block_payload, block_payload_2, - "The block payloads should match" - ); - assert_ne!( - retrieved_block_info.block_payload, block_payload_1, - "The block payloads should not match" - ); - assert_eq!( - retrieved_block_info.metadata, metadata_2, - "The metadata should match" - ); - assert_eq!( - retrieved_block_info.metadata, metadata_1, - "The metadata should match" - ); - // TestMetadata will always match - - assert_eq!( - retrieved_block_info.offered_fee, offered_fee_2, - "The offered fee should match" - ); - assert_ne!( - retrieved_block_info.offered_fee, offered_fee_1, - "The offered fee should not match" - ); - assert_eq!( - retrieved_block_info.truncated, truncated_2, - "The truncated flag should match" - ); - assert_ne!( - retrieved_block_info.truncated, truncated_1, - "The truncated flag should not match" - ); - - { - // This ensures that the vid_trigger that is stored is still the - // same, or links to the vid_trigger_receiver that we submitted. - let mut vid_trigger_write_lock_guard = - retrieved_block_info.vid_trigger.write_arc().await; - if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { - vid_trigger_sender.send(TriggerStatus::Start); - } - - match vid_trigger_receiver_2.recv().await { - Ok(TriggerStatus::Start) => { - // This is expected - } - _ => { - panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } - } - - assert!( - vid_trigger_receiver_1.recv().await.is_err(), - "This should not receive anything from vid_trigger_receiver_1" - ); - } - - { - // This ensures that the vid_sender that is stored is still the - // same, or links to the vid_receiver that we submitted. - let (vid_commitment, vid_precompute) = - precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); - assert_eq!( - vid_sender_2 - .send((vid_commitment, vid_precompute.clone())) - .await, - Ok(()), - "The vid_sender should be able to send the vid commitment and precompute" - ); - - assert!( - vid_sender_1 - .send((vid_commitment, vid_precompute.clone())) - .await - .is_err(), - "The vid_sender should not be able to send the vid commitment and precompute" - ); - - let mut vid_receiver_write_lock_guard = - retrieved_block_info.vid_receiver.write_arc().await; - - // Get and Keep object - - match vid_receiver_write_lock_guard.get().await { - Ok((received_vid_commitment, received_vid_precompute)) => { - assert_eq!( - received_vid_commitment, vid_commitment, - "The received vid commitment should match the expected vid commitment" - ); - assert_eq!( - received_vid_precompute, vid_precompute, - "The received vid precompute should match the expected vid precompute" - ); - } - _ => { - panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); - } - } - } - - // finish with builder_state_to_last_built_block - - assert_eq!( - state.builder_state_to_last_built_block.len(), - 1, - "The builder state to last built block should have a single entry" - ); - - let last_built_block = state - .builder_state_to_last_built_block - .get(&builder_state_id); - - assert!( - last_built_block.is_some(), - "The last built block should be retrievable" - ); - - let last_built_block = last_built_block.unwrap(); - - assert_eq!( - last_built_block.builder_hash, builder_hash, - "The last built block id should match the block id" - ); - - assert_eq!( - last_built_block.block_size, block_size_2, - "The last built block size should match the response message" - ); - assert_ne!( - last_built_block.block_size, block_size_1, - "The last built block size should not match the previous block size" - ); - - assert_eq!( - last_built_block.offered_fee, offered_fee_2, - "The last built block offered fee should match the response message" - ); - assert_ne!( - last_built_block.offered_fee, offered_fee_1, - "The last built block offered fee should not match the previous block offered fee" - ); - } - - // GlobalState::remove_handles Tests - - /// This test checks to ensure that remove_handles will only consider - /// views up to what is known to have been stored. As a result it will - /// indicate that is has only targeted to the highest view number that it - /// is aware of. - #[async_std::test] - async fn test_global_state_remove_handles_prune_up_to_latest() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - // We register a few builder states. - for i in 1..=10 { - let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); - let view = ViewNumber::new(i as u64); - - state.register_builder_state( - BuilderStateId { - parent_commitment: vid_commit, - parent_view: view, - }, - ParentBlockReferences { - view_number: view, - vid_commitment: vid_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - async_broadcast::broadcast(10).0, - ); - } - - assert_eq!( - state.spawned_builder_states.len(), - 11, - "The spawned_builder_states should have the expected number of entries", - ); - - assert_eq!( - state.remove_handles(ViewNumber::new(100)), - ViewNumber::new(10), - "It should only be able to prune up to what has been stored" - ); - - assert_eq!( - state.spawned_builder_states.len(), - 1, - "The spawned_builder_states should only have a single entry in it" - ); - - let builder_state_id = BuilderStateId { - parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(10), - }; - assert_eq!( - state.highest_view_num_builder_id, builder_state_id, - "The highest view number builder id should be the one that was just registered" - ); - - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(9), - "The last garbage collected view number should match expected value" - ); - - assert!( - state.spawned_builder_states.contains_key(&BuilderStateId { - parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(10), - }), - "The spawned builder states should contain the builder state id: {builder_state_id}" - ); - } - - /// This test checks that the remove_handles doesn't ensure that the - /// `last_garbage_collected_view_num` is strictly increasing. By first - /// removing a higher view number, followed by a smaller view number - /// (with the highest_view_num_builder_id having a view greater than or - /// equal to both targets) we can demonstrate this property. - /// - /// Furthermore this demonstrates that by supplying any view number to - /// remove_handles that is less than `last_garbage_collected_view_num` will - /// result in `last_garbage_collected_view_num` being updated to the given - /// value minus 1, without regard for it actually removing / cleaning - /// anything, or whether it is moving backwards in view numbers. - /// - /// If we were to account for the view numbers actually being cleaned up, - /// we could still trigger this behavior be re-adding the builder states - /// with a view number that precedes the last garbage collected view number, - /// then removing them would trigger the same behavior. - #[async_std::test] - async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - // We register a few builder states. - for i in 1..=10 { - let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); - let view = ViewNumber::new(i as u64); - - state.register_builder_state( - BuilderStateId { - parent_commitment: vid_commit, - parent_view: view, - }, - ParentBlockReferences { - view_number: view, - vid_commitment: vid_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - async_broadcast::broadcast(10).0, - ); - } - - assert_eq!( - state.highest_view_num_builder_id, - BuilderStateId { - parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(10), - }, - "The highest view number builder id should be the one that was just registered" - ); - - assert_eq!( - state.remove_handles(ViewNumber::new(10)), - ViewNumber::new(10), - "It should remove what has been stored" - ); - - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(9), - "The last garbage collected view number should match expected value" - ); - - assert_eq!( - state.remove_handles(ViewNumber::new(5)), - ViewNumber::new(5), - "If we only remove up to view 5, then only entries preceding view 5 should be removed" - ); - - // The last garbage collected view has gone down as a result of our - // new remove_handles target, demonstrating that this number isn't - // strictly increasing in value. - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(4), - "The last garbage collected view number should match expected value", - ); - } - - /// This test checks that the remove_handles doesn't ensure that the - /// `last_garbage_collected_view_num` is strictly increasing. It is very - /// similar to `test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple` - /// but differs in that it re-adds the removed builder states, just in case - /// the previous test's behavior is erroneous and fixed by ensuring that we - /// only consider removed view numbers. - #[async_std::test] - async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_strict() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - // We register a few builder states. - for i in 1..=10 { - let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); - let view = ViewNumber::new(i as u64); - - state.register_builder_state( - BuilderStateId { - parent_commitment: vid_commit, - parent_view: view, - }, - ParentBlockReferences { - view_number: view, - vid_commitment: vid_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - async_broadcast::broadcast(10).0, - ); - } - - assert_eq!( - state.highest_view_num_builder_id, - BuilderStateId { - parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(10), - }, - "The highest view number builder id should be the one that was just registered" - ); - - assert_eq!( - state.remove_handles(ViewNumber::new(10)), - ViewNumber::new(10), - "It should remove what has been stored" - ); - - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(9), - "The last garbage collected view number should match expected value" - ); - - // We re-add these removed builder_state_ids - for i in 1..10 { - let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); - let view = ViewNumber::new(i as u64); - - state.register_builder_state( - BuilderStateId { - parent_commitment: vid_commit, - parent_view: view, - }, - ParentBlockReferences { - view_number: view, - vid_commitment: vid_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - async_broadcast::broadcast(10).0, - ); - } - - assert_eq!( - state.remove_handles(ViewNumber::new(5)), - ViewNumber::new(5), - "If we only remove up to view 5, then only entries preceding view 5 should be removed" - ); - - // The last garbage collected view has gone down as a result of our - // new remove_handles target, demonstrating that this number isn't - // strictly increasing in value. - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(4), - "The last garbage collected view number should match expected value", - ); - } - - /// This test checks that the remove_handles methods will correctly remove - /// The expected number of builder states from the spawned_builder_states - /// hashmap. It does this by specifically controlling the number of builder - /// states that are registered, and then removing a subset of them. It - /// verifies the absence of the entries that should have been removed, and - /// the presence of the entries that should have been kept. - #[async_std::test] - async fn test_global_state_remove_handles_expected() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); - let mut state = GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ); - - // We register a few builder states. - for i in 1..=10 { - let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); - let view = ViewNumber::new(i as u64); - - state.register_builder_state( - BuilderStateId { - parent_commitment: vid_commit, - parent_view: view, - }, - ParentBlockReferences { - view_number: view, - vid_commitment: vid_commit, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - async_broadcast::broadcast(10).0, - ); - } - - assert_eq!( - state.spawned_builder_states.len(), - 11, - "The spawned_builder_states should have 11 elements in it" - ); - - assert_eq!( - state.highest_view_num_builder_id, - BuilderStateId { - parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(10), - }, - "The highest view number builder id should be the one that was just registered" - ); - - assert_eq!( - state.last_garbage_collected_view_num, - ViewNumber::new(0), - "The last garbage collected view number should be hat was passed in" - ); - - // Now we want to clean up some previous builder states to ensure that we - // remove the appropriate targets. - - // This should remove the view builder states preceding the view number 5 - assert_eq!( - state.remove_handles(ViewNumber::new(5)), - ViewNumber::new(5), - "The last garbage collected view number should match expected value" - ); - - // There should be 11 - 5 entries remaining - assert_eq!( - state.spawned_builder_states.len(), - 6, - "The spawned_builder_states should have 6 elements in it" - ); - - for i in 0..5 { - let builder_state_id = BuilderStateId { - parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(i as u64), - }; - assert!( - !state.spawned_builder_states.contains_key(&builder_state_id), - "the spawned builder states should contain the builder state id, {builder_state_id}" - ); - } - - for i in 5..=10 { - let builder_state_id = BuilderStateId { - parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), - parent_view: ViewNumber::new(i as u64), - }; - assert!( - state.spawned_builder_states.contains_key(&builder_state_id), - "The spawned builder states should contain the builder state id: {builder_state_id}" - ); - } - } - - // Get Available Blocks Tests - - /// This test checks that the error `AvailableBlocksError::NoBlocksAvailable` - /// is returned when no blocks are available. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of get_available_blocks, and we do not provide any - /// information for the block view number requested. As a result, the - /// implementation will ultimately timeout, and return an error that - /// indicates that no blocks were available. - #[async_std::test] - async fn test_get_available_blocks_error_no_blocks_available() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key), - Duration::from_millis(100), - ); - - // leader_private_key - let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); - - // This *should* just time out - let result = state - .available_blocks_implementation( - &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), - 1, - leader_public_key, - &signature, - ) - .await; - - match result { - Err(AvailableBlocksError::NoBlocksAvailable) => { - // This is what we expect. - // This message *should* indicate that no blocks were available. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `AvailableBlocksError::SignatureValidationFailed` - /// is returned when the signature is invalid. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of get_available_blocks, but we sign the request with - /// the builder's private key instead of the leader's private key. Since - /// these keys do not match, this will result in a signature verification - /// error. - #[async_std::test] - async fn test_get_available_blocks_error_invalid_signature() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, _leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_millis(100), - ); - - // leader_private_key - let signature = BLSPubKey::sign(&builder_private_key, parent_commit.as_ref()).unwrap(); - - // This *should* just time out - let result = state - .available_blocks_implementation( - &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), - 1, - leader_public_key, - &signature, - ) - .await; - - match result { - Err(AvailableBlocksError::SignatureValidationFailed) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided` - /// is returned when the requested view number has already been garbage - /// collected. - /// - /// To trigger this condition, we initialize the GlobalState with a - /// garbage collected view number that is higher than the view that will - /// be requested. - #[async_std::test] - async fn test_get_available_blocks_error_requesting_previous_view_number() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(2), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key), - Duration::from_millis(100), - ); - - // leader_private_key - let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); - - // This *should* just time out - let result = state - .available_blocks_implementation( - &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), - 1, - leader_public_key, - &signature, - ) - .await; - - match result { - Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `AvailableBlocksError::GetChannelForMatchingBuilderError` - /// is returned when attempting to retrieve a view that is not stored within the state, and - /// the highest view is also no longer stored within the state. - /// - /// To trigger this condition, we initialize the GlobalState with an initial - /// state, and then we mutate the state to record the wrong latest state id. - /// When interacted with `GlobalState` via `register_builder_state`, and - /// `remove_handles`, this error doesn't seem possible immediately possible. - #[async_std::test] - async fn test_get_available_blocks_error_get_channel_for_matching_builder() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(4), - ViewNumber::new(4), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - { - let mut write_locked_global_state = state.global_state.write_arc().await; - write_locked_global_state.highest_view_num_builder_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(5), - }; - } - - // As a result, we **should** be receiving a request for the available - // blocks with our expected state id on the receiver, along with a channel - // to send the response back to the caller. - - let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); - let result = state - .available_blocks_implementation(&parent_commit, 6, leader_public_key, &signature) - .await; - match result { - Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { - // This is what we expect. - // This message *should* indicate that the response channel was closed. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - // We have two error cases for `available_blocks_implementation` that we - // cannot seem trigger directly due to the nature of how the implementation - // performs. - // - // The first is ChannelUnexpectedlyClosed, which doesn't seem to be - // producible as the unbounded channel doesn't seem to be able to be - // closed. - // - // The second is SigningBlockFailed, which doesn't seem to be producible - // with a valid private key, and it's not clear how to create an invalid - // private key. - - /// This test checks that call to `available_blocks_implementation` returns - /// a successful response when the function is called before blocks are - /// made available. - #[async_std::test] - async fn test_get_available_blocks_requested_before_blocks_available() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let cloned_parent_commit = parent_commit; - let cloned_state = state.clone(); - let cloned_leader_private_key = leader_private_key.clone(); - - // We want to trigger a request for the available blocks, before we make the available block available - let get_available_blocks_handle = async_std::task::spawn(async move { - // leader_private_key - let signature = - BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); - cloned_state - .available_blocks_implementation( - &cloned_parent_commit, - 1, - leader_public_key, - &signature, - ) - .await - }); - - // Now we want to make the block data available to the state. - let expected_builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(1), - }; - - let mut response_receiver = { - // We only want to keep this write lock for the time needed, and - // no more. - let mut write_locked_global_state = state.global_state.write_arc().await; - - // We insert a sender so that the next time this stateId is requested, - // it will be available to send data back. - let (response_sender, response_receiver) = async_broadcast::broadcast(10); - write_locked_global_state.register_builder_state( - expected_builder_state_id.clone(), - ParentBlockReferences { - view_number: expected_builder_state_id.parent_view, - vid_commitment: expected_builder_state_id.parent_commitment, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - response_sender, - ); - - response_receiver - }; - - // As a result, we **should** be receiving a request for the available - // blocks with our expected state id on the receiver, along with a channel - // to send the response back to the caller. - - let response_channel = match response_receiver.next().await { - None => { - panic!("Expected a request for available blocks, but didn't get one"); - } - Some(MessageType::RequestMessage(req_msg)) => { - assert_eq!(req_msg.state_id, expected_builder_state_id); - req_msg.response_channel - } - Some(message) => { - panic!( - "Expected a request for available blocks, but got a different message: {:?}", - message - ); - } - }; - - // We want to send a ResponseMessage to the channel - let expected_response = ResponseMessage { - block_size: 9, - offered_fee: 7, - builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), - }; - - assert!( - response_channel - .send(expected_response.clone()) - .await - .is_ok(), - "failed to send ResponseMessage" - ); - - let result = get_available_blocks_handle.await; - match result { - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(result) => { - assert_eq!( - result, - vec![AvailableBlockInfo { - block_hash: expected_response.builder_hash.clone(), - block_size: expected_response.block_size, - offered_fee: expected_response.offered_fee, - signature: ::sign_block_info( - &builder_private_key, - expected_response.block_size, - expected_response.offered_fee, - &expected_response.builder_hash, - ) - .unwrap(), - sender: builder_public_key, - _phantom: Default::default(), - }], - "get_available_blocks response matches expectation" - ); - } - } - } - - /// This test checks that call to `available_blocks_implementation` returns - /// a successful response when the function is called after blocks are - /// made available. - #[async_std::test] - async fn test_get_available_blocks_requested_after_blocks_available() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let cloned_parent_commit = parent_commit; - let cloned_state = state.clone(); - let cloned_leader_private_key = leader_private_key.clone(); - - // Now we want to make the block data available to the state. - let expected_builder_state_id = BuilderStateId { - parent_commitment: parent_commit, - parent_view: ViewNumber::new(1), - }; - - let mut response_receiver = { - // We only want to keep this write lock for the time needed, and - // no more. - let mut write_locked_global_state = state.global_state.write_arc().await; - - // We insert a sender so that the next time this stateId is requested, - // it will be available to send data back. - let (response_sender, response_receiver) = async_broadcast::broadcast(10); - write_locked_global_state.register_builder_state( - expected_builder_state_id.clone(), - ParentBlockReferences { - view_number: expected_builder_state_id.parent_view, - vid_commitment: expected_builder_state_id.parent_commitment, - leaf_commit: Commitment::from_raw([0; 32]), - builder_commitment: BuilderCommitment::from_bytes([]), - }, - response_sender, - ); - - response_receiver - }; - - // We want to trigger a request for the available blocks, before we make the available block available - let get_available_blocks_handle = async_std::task::spawn(async move { - // leader_private_key - let signature = - BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); - cloned_state - .available_blocks_implementation( - &cloned_parent_commit, - 1, - leader_public_key, - &signature, - ) - .await - }); - - // As a result, we **should** be receiving a request for the available - // blocks with our expected state id on the receiver, along with a channel - // to send the response back to the caller. - - let response_channel = match response_receiver.next().await { - None => { - panic!("Expected a request for available blocks, but didn't get one"); - } - Some(MessageType::RequestMessage(req_msg)) => { - assert_eq!(req_msg.state_id, expected_builder_state_id); - req_msg.response_channel - } - Some(message) => { - panic!( - "Expected a request for available blocks, but got a different message: {:?}", - message - ); - } - }; - - // We want to send a ResponseMessage to the channel - let expected_response = ResponseMessage { - block_size: 9, - offered_fee: 7, - builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), - }; - - assert!( - response_channel - .send(expected_response.clone()) - .await - .is_ok(), - "failed to send ResponseMessage" - ); - - let result = get_available_blocks_handle.await; - match result { - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(result) => { - assert_eq!( - result, - vec![AvailableBlockInfo { - block_hash: expected_response.builder_hash.clone(), - block_size: expected_response.block_size, - offered_fee: expected_response.offered_fee, - signature: ::sign_block_info( - &builder_private_key, - expected_response.block_size, - expected_response.offered_fee, - &expected_response.builder_hash, - ) - .unwrap(), - sender: builder_public_key, - _phantom: Default::default(), - }], - "get_available_blocks response matches expectation" - ); - } - } - } - - // Claim Block Tests - - /// This test checks that the error `ClaimBlockError::SignatureValidationFailed` - /// is returned when the signature is invalid. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we sign the request with - /// the builder's private key instead of the leader's private key. Since - /// these keys do not match, this will result in a signature verification - /// error. - #[async_std::test] - async fn test_claim_block_error_signature_validation_failed() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, _leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - - let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); - let result = state - .claim_block_implementation(&commitment, 1, leader_public_key, &signature) - .await; - - match result { - Err(ClaimBlockError::SignatureValidationFailed) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `ClaimBlockError::BlockDataNotFound` - /// is returned when the block data is not found. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we do not provide any information - /// for the block data requested. As a result, the implementation will - /// ultimately timeout, and return an error that indicates that the block - /// data was not found. - #[async_std::test] - async fn test_claim_block_error_block_data_not_found() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - - let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); - let result = state - .claim_block_implementation(&commitment, 1, leader_public_key, &signature) - .await; - - match result { - Err(ClaimBlockError::BlockDataNotFound) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the function completes successfully. - #[async_std::test] - async fn test_claim_block_success() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - let cloned_commitment = commitment.clone(); - let cloned_state = state.clone(); - - let vid_trigger_receiver = { - let mut global_state_write_lock = state.global_state.write_arc().await; - let block_id = BlockId { - hash: commitment, - view: ViewNumber::new(1), - }; - - let payload = TestBlockPayload { - transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], - }; - - let (vid_trigger_sender, vid_trigger_receiver) = - async_compatibility_layer::channel::oneshot(); - let (_, vid_receiver) = unbounded(); - - global_state_write_lock.blocks.put( - block_id, - BlockInfo { - block_payload: payload, - metadata: TestMetadata { - num_transactions: 1, - }, - vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), - vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( - vid_receiver, - ))), - offered_fee: 100, - truncated: false, - }, - ); - - vid_trigger_receiver - }; - - let claim_block_join_handle = async_std::task::spawn(async move { - let signature = - BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); - cloned_state - .claim_block_implementation(&cloned_commitment, 1, leader_public_key, &signature) - .await - }); - - // This should be the started event - match vid_trigger_receiver.recv().await { - Ok(TriggerStatus::Start) => { - // This is what we expect. - } - _ => { - panic!("Expected a TriggerStatus::Start event"); - } - } - - let result = claim_block_join_handle.await; - - match result { - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - // This is expected - } - } - } - - // Claim Block Header Input Tests - - /// This test checks that the error `ClaimBlockHeaderInputError::SignatureValidationFailed` - /// is returned when the signature is invalid. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we sign the request with - /// the builder's private key instead of the leader's private key. Since - /// these keys do not match, this will result in a signature verification - /// error. - #[async_std::test] - async fn test_claim_block_header_input_error_signature_verification_failed() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, _leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - - let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); - - let result = state - .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) - .await; - - match result { - Err(ClaimBlockHeaderInputError::SignatureValidationFailed) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `ClaimBlockHeaderInputError::BlockHeaderNotFound` - /// is returned when the block header is not found. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we do not provide any information - /// for the block header requested. As a result, the implementation will - /// ultimately timeout, and return an error that indicates that the block - /// header was not found. - #[async_std::test] - async fn test_claim_block_header_input_error_block_header_not_found() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - - let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); - - let result = state - .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) - .await; - - match result { - Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `ClaimBlockHeaderInputError::CouldNotGetVidInTime` - /// is returned when the VID is not received in time. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we do not provide a VID. As a result, - /// the implementation will ultimately timeout, and return an error that - /// indicates that the VID was not received in time. - /// - /// At least that's what it should do. At the moment, this results in a - /// deadlock due to attempting to acquire the `write_arc` twice. - #[async_std::test] - async fn test_claim_block_header_input_error_could_not_get_vid_in_time() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - let cloned_commitment = commitment.clone(); - let cloned_state = state.clone(); - - let _vid_sender = { - let mut global_state_write_lock = state.global_state.write_arc().await; - let block_id = BlockId { - hash: commitment, - view: ViewNumber::new(1), - }; - - let payload = TestBlockPayload { - transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], - }; - - let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); - let (vid_sender, vid_receiver) = unbounded(); - - global_state_write_lock.blocks.put( - block_id, - BlockInfo { - block_payload: payload, - metadata: TestMetadata { - num_transactions: 1, - }, - vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), - vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( - vid_receiver, - ))), - offered_fee: 100, - truncated: false, - }, - ); - - vid_sender - }; - - let claim_block_header_input_join_handle = async_std::task::spawn(async move { - let signature = - BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); - cloned_state - .claim_block_header_input_implementation( - &cloned_commitment, - 1, - leader_public_key, - &signature, - ) - .await - }); - - let result = claim_block_header_input_join_handle.await; - - match result { - Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that the error `ClaimBlockHeaderInputError::WaitAndKeepGetError` - /// is returned when the VID is not received in time. - /// - /// To trigger this condition, we simply submit a request to the - /// implementation of claim_block, but we close the VID receiver channel's - /// sender. - #[async_std::test] - async fn test_claim_block_header_input_error_keep_and_wait_get_error() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - let cloned_commitment = commitment.clone(); - let cloned_state = state.clone(); - - { - let mut global_state_write_lock = state.global_state.write_arc().await; - let block_id = BlockId { - hash: commitment, - view: ViewNumber::new(1), - }; - - let payload = TestBlockPayload { - transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], - }; - - let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); - let (_, vid_receiver) = unbounded(); - - global_state_write_lock.blocks.put( - block_id, - BlockInfo { - block_payload: payload, - metadata: TestMetadata { - num_transactions: 1, - }, - vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), - vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( - vid_receiver, - ))), - offered_fee: 100, - truncated: false, - }, - ); - }; - - let claim_block_header_input_join_handle = async_std::task::spawn(async move { - let signature = - BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); - cloned_state - .claim_block_header_input_implementation( - &cloned_commitment, - 1, - leader_public_key, - &signature, - ) - .await - }); - - let result = claim_block_header_input_join_handle.await; - - match result { - Err(ClaimBlockHeaderInputError::WaitAndKeepGetError(_)) => { - // This is what we expect. - // This message *should* indicate that the signature passed - // did not match the given public key. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - } - } - - /// This test checks that successful response is returned when the VID is - /// received in time. - #[async_std::test] - async fn test_claim_block_header_input_success() { - let (bootstrap_sender, _) = async_broadcast::broadcast(10); - let (tx_sender, _) = async_broadcast::broadcast(10); - let (builder_public_key, builder_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (leader_public_key, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); - - let state = Arc::new(ProxyGlobalState::::new( - Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender, - parent_commit, - ViewNumber::new(0), - ViewNumber::new(0), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_NUM_NODES_IN_VID_COMPUTATION, - ))), - (builder_public_key, builder_private_key.clone()), - Duration::from_secs(1), - )); - - let commitment = BuilderCommitment::from_bytes([0; 256]); - let cloned_commitment = commitment.clone(); - let cloned_state = state.clone(); - - let vid_sender = { - let mut global_state_write_lock = state.global_state.write_arc().await; - let block_id = BlockId { - hash: commitment, - view: ViewNumber::new(1), - }; - - let payload = TestBlockPayload { - transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], - }; - - let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); - let (vid_sender, vid_receiver) = unbounded(); - - global_state_write_lock.blocks.put( - block_id, - BlockInfo { - block_payload: payload, - metadata: TestMetadata { - num_transactions: 1, - }, - vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), - vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( - vid_receiver, - ))), - offered_fee: 100, - truncated: false, - }, - ); - - vid_sender - }; - - let claim_block_header_input_join_handle = async_std::task::spawn(async move { - let signature = - BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); - cloned_state - .claim_block_header_input_implementation( - &cloned_commitment, - 1, - leader_public_key, - &signature, - ) - .await - }); - - vid_sender - .send(precompute_vid_commitment(&[1, 2, 3, 4], 2)) - .await - .unwrap(); - - let result = claim_block_header_input_join_handle.await; - - match result { - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - Ok(_) => { - // This is expected. - } - } - } - - // handle_da_event Tests - - /// This test checks that the error [HandleDaEventError::SignatureValidationFailed] - /// is returned under the right conditions of invoking - /// [handle_da_event_implementation]. - /// - /// To trigger this error, we simply need to ensure that signature provided - /// to the [Proposal] does not match the public key of the sender. - /// Additionally, the public keys passed for both the leader and the sender - /// need to match each other. - #[async_std::test] - async fn test_handle_da_event_implementation_error_signature_validation_failed() { - let (sender_public_key, _) = - ::generated_from_seed_indexed([0; 32], 0); - let (_, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let (da_channel_sender, _) = async_broadcast::broadcast(10); - let view_number = ViewNumber::new(10); - - let da_proposal = DaProposal:: { - encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), - metadata: TestMetadata { - num_transactions: 1, - }, // arbitrary - view_number, - }; - - let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); - let signature = - ::sign(&leader_private_key, &encoded_txns_hash).unwrap(); - - let signed_da_proposal = Arc::new(Proposal { - data: da_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_da_event_implementation( - &da_channel_sender, - signed_da_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Err(HandleDaEventError::SignatureValidationFailed) => { - // This is expected. - } - Ok(_) => { - panic!("expected an error, but received a successful attempt instead") - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - } - - /// This test checks that the error [HandleDaEventError::BroadcastFailed] - /// is returned under the right conditions of invoking - /// [handle_da_event_implementation]. - /// - /// To trigger this error, we simply need to ensure that the broadcast - /// channel receiver has been closed / dropped before the attempt to - /// send on the broadcast sender is performed. - #[async_std::test] - async fn test_handle_da_event_implementation_error_broadcast_failed() { - let (sender_public_key, sender_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let da_channel_sender = { - let (da_channel_sender, _) = async_broadcast::broadcast(10); - da_channel_sender - }; - - let view_number = ViewNumber::new(10); - - let da_proposal = DaProposal:: { - encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), - metadata: TestMetadata { - num_transactions: 1, - }, // arbitrary - view_number, - }; - - let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); - let signature = - ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); - - let signed_da_proposal = Arc::new(Proposal { - data: da_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_da_event_implementation( - &da_channel_sender, - signed_da_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Err(HandleDaEventError::BroadcastFailed(_)) => { - // This error is expected - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - } - - /// This test checks the expected successful behavior of the - /// [handle_da_event_implementation] function. - #[async_std::test] - async fn test_handle_da_event_implementation_success() { - let (sender_public_key, sender_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (da_channel_sender, da_channel_receiver) = async_broadcast::broadcast(10); - let view_number = ViewNumber::new(10); - - let da_proposal = DaProposal:: { - encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), - metadata: TestMetadata { - num_transactions: 1, - }, // arbitrary - view_number, - }; - - let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); - let signature = - ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); - - let signed_da_proposal = Arc::new(Proposal { - data: da_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_da_event_implementation( - &da_channel_sender, - signed_da_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Ok(_) => { - // This is expected. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - - let mut da_channel_receiver = da_channel_receiver; - match da_channel_receiver.next().await { - Some(MessageType::DaProposalMessage(da_proposal_message)) => { - assert_eq!(da_proposal_message.proposal, signed_da_proposal); - } - _ => { - panic!("Expected a DaProposalMessage, but got something else"); - } - } - } - - // handle_quorum_event Tests - - /// This test checks that the error [HandleQuorumEventError::SignatureValidationFailed] - /// is returned under the right conditions of invoking - /// [handle_quorum_event_implementation]. - /// - /// To trigger this error, we simply need to ensure that the signature - /// provided to the [Proposal] does not match the public key of the sender. - /// - /// Additionally, the public keys passed for both the leader and the sender - /// need to match each other. - #[async_std::test] - async fn test_handle_quorum_event_error_signature_validation_failed() { - let (sender_public_key, _) = - ::generated_from_seed_indexed([0; 32], 0); - let (_, leader_private_key) = - ::generated_from_seed_indexed([0; 32], 1); - let (quorum_channel_sender, _) = async_broadcast::broadcast(10); - let view_number = ViewNumber::new(10); - - let quorum_proposal = { - let leaf = Leaf::::genesis( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await; - - QuorumProposal:: { - block_header: leaf.block_header().clone(), - view_number, - justify_qc: QuorumCertificate::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await, - upgrade_certificate: None, - proposal_certificate: None, - } - }; - - let leaf = Leaf::from_quorum_proposal(&quorum_proposal); - - let signature = - ::sign(&leader_private_key, leaf.legacy_commit().as_ref()) - .unwrap(); - - let signed_quorum_proposal = Arc::new(Proposal { - data: quorum_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_quorum_event_implementation( - &quorum_channel_sender, - signed_quorum_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Err(HandleQuorumEventError::SignatureValidationFailed) => { - // This is expected. - } - Ok(_) => { - panic!("expected an error, but received a successful attempt instead"); - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - } - - /// This test checks that the error [HandleQuorumEventError::BroadcastFailed] - /// is returned under the right conditions of invoking - /// [handle_quorum_event_implementation]. - /// - /// To trigger this error, we simply need to ensure that the broadcast - /// channel receiver has been closed / dropped before the attempt to - /// send on the broadcast sender is performed. - #[async_std::test] - async fn test_handle_quorum_event_error_broadcast_failed() { - let (sender_public_key, sender_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let quorum_channel_sender = { - let (quorum_channel_sender, _) = async_broadcast::broadcast(10); - quorum_channel_sender - }; - - let view_number = ViewNumber::new(10); - - let quorum_proposal = { - let leaf = Leaf::::genesis( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await; - - QuorumProposal:: { - block_header: leaf.block_header().clone(), - view_number, - justify_qc: QuorumCertificate::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await, - upgrade_certificate: None, - proposal_certificate: None, - } - }; - - let leaf = Leaf::from_quorum_proposal(&quorum_proposal); - - let signature = - ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) - .unwrap(); - - let signed_quorum_proposal = Arc::new(Proposal { - data: quorum_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_quorum_event_implementation( - &quorum_channel_sender, - signed_quorum_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Err(HandleQuorumEventError::BroadcastFailed(_)) => { - // This is expected. - } - Ok(_) => { - panic!("Expected an error, but got a result"); - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - } - - /// This test checks to ensure that [handle_quorum_event_implementation] - /// completes successfully as expected when the correct conditions are met. - #[async_std::test] - async fn test_handle_quorum_event_success() { - let (sender_public_key, sender_private_key) = - ::generated_from_seed_indexed([0; 32], 0); - let (quorum_channel_sender, quorum_channel_receiver) = async_broadcast::broadcast(10); - let view_number = ViewNumber::new(10); - - let quorum_proposal = { - let leaf = Leaf::::genesis( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await; - - QuorumProposal:: { - block_header: leaf.block_header().clone(), - view_number, - justify_qc: QuorumCertificate::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await, - upgrade_certificate: None, - proposal_certificate: None, - } - }; - - let leaf = Leaf::from_quorum_proposal(&quorum_proposal); - - let signature = - ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) - .unwrap(); - - let signed_quorum_proposal = Arc::new(Proposal { - data: quorum_proposal, - signature, - _pd: Default::default(), - }); - - let result = handle_quorum_event_implementation( - &quorum_channel_sender, - signed_quorum_proposal.clone(), - sender_public_key, - ) - .await; - - match result { - Ok(_) => { - // This is expected. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - - let mut quorum_channel_receiver = quorum_channel_receiver; - match quorum_channel_receiver.next().await { - Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { - assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); - } - _ => { - panic!("Expected a QuorumProposalMessage, but got something else"); - } - } - } - - // HandleReceivedTxns Tests - - /// This test checks that the error [HandleReceivedTxnsError::TooManyTransactions] - /// is returned when the conditions are met. - /// - /// To trigger this error we simply provide a broadcast channel with a - /// buffer smaller than the number of transactions we are attempting to - /// send through it. - #[async_std::test] - async fn test_handle_received_txns_error_too_many_transactions() { - let (tx_sender, tx_receiver) = async_broadcast::broadcast(2); - let num_transactions = 5; - let mut txns = Vec::with_capacity(num_transactions); - for index in 0..num_transactions { - txns.push(TestTransaction::new(vec![index as u8])); - } - let txns = txns; - - { - let mut handle_received_txns_iter = HandleReceivedTxns::::new( - tx_sender, - txns.clone(), - TransactionSource::HotShot, - TEST_MAX_TX_LEN, - ); - - assert!(handle_received_txns_iter.next().is_some()); - assert!(handle_received_txns_iter.next().is_some()); - match handle_received_txns_iter.next() { - Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { - // This is expected, - } - Some(Err(err)) => { - panic!("Unexpected error: {:?}", err); - } - Some(Ok(_)) => { - panic!("Expected an error, but got a result"); - } - None => { - panic!("Expected an error, but got a result"); - } - } - } - - let mut tx_receiver = tx_receiver; - assert!(tx_receiver.next().await.is_some()); - assert!(tx_receiver.next().await.is_some()); - assert!(tx_receiver.next().await.is_none()); - } - - /// This test checks that the error [HandleReceivedTxnsError::TransactionTooBig] - /// when the conditions are met. - /// - /// To trigger this error we simply provide a [TestTransaction] whose size - /// exceeds the maximum transaction length. we pass to [HandleReceivedTxns]. - #[async_std::test] - async fn test_handle_received_txns_error_transaction_too_big() { - let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); - let num_transactions = 2; - let mut txns = Vec::with_capacity(num_transactions + 1); - for index in 0..num_transactions { - txns.push(TestTransaction::new(vec![index as u8])); - } - txns.push(TestTransaction::new(vec![0; 256])); - let txns = txns; - - { - let mut handle_received_txns_iter = HandleReceivedTxns::::new( - tx_sender, - txns.clone(), - TransactionSource::HotShot, - TEST_MAX_TX_LEN, - ); - - assert!(handle_received_txns_iter.next().is_some()); - assert!(handle_received_txns_iter.next().is_some()); - match handle_received_txns_iter.next() { - Some(Err(HandleReceivedTxnsError::TransactionTooBig { - estimated_length, - max_txn_len, - })) => { - // This is expected, - assert!(estimated_length >= 256); - assert_eq!(max_txn_len, TEST_MAX_TX_LEN); - } - Some(Err(err)) => { - panic!("Unexpected error: {:?}", err); - } - Some(Ok(_)) => { - panic!("Expected an error, but got a result"); - } - None => { - panic!("Expected an error, but got a result"); - } - } - } - - let mut tx_receiver = tx_receiver; - assert!(tx_receiver.next().await.is_some()); - assert!(tx_receiver.next().await.is_some()); - assert!(tx_receiver.next().await.is_none()); - } - - /// This test checks that the error [HandleReceivedTxnsError::Internal] - /// is returned when the broadcast channel is closed. - /// - /// To trigger this error we simply close the broadcast channel receiver - /// before attempting to send any transactions through the broadcast channel - /// sender. - #[async_std::test] - async fn test_handle_received_txns_error_internal() { - let tx_sender = { - let (tx_sender, _) = async_broadcast::broadcast(10); - tx_sender - }; - - let num_transactions = 10; - let mut txns = Vec::with_capacity(num_transactions); - for index in 0..num_transactions { - txns.push(TestTransaction::new(vec![index as u8])); - } - txns.push(TestTransaction::new(vec![0; 256])); - let txns = txns; - - { - let mut handle_received_txns_iter = HandleReceivedTxns::::new( - tx_sender, - txns.clone(), - TransactionSource::HotShot, - TEST_MAX_TX_LEN, - ); - - match handle_received_txns_iter.next() { - Some(Err(HandleReceivedTxnsError::Internal(err))) => { - // This is expected, - - match err { - async_broadcast::TrySendError::Closed(_) => { - // This is expected. - } - _ => { - panic!("Unexpected error: {:?}", err); - } - } - } - Some(Err(err)) => { - panic!("Unexpected error: {:?}", err); - } - Some(Ok(_)) => { - panic!("Expected an error, but got a result"); - } - None => { - panic!("Expected an error, but got a result"); - } - } - } - } - - /// This test checks that [HandleReceivedTxns] processes completely without - /// issue when the conditions are correct for it to do so. - #[async_std::test] - async fn test_handle_received_txns_success() { - let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); - let num_transactions = 10; - let mut txns = Vec::with_capacity(num_transactions); - for index in 0..num_transactions { - txns.push(TestTransaction::new(vec![index as u8])); - } - let txns = txns; - - let handle_received_txns_iter = HandleReceivedTxns::::new( - tx_sender, - txns.clone(), - TransactionSource::HotShot, - TEST_MAX_TX_LEN, - ); - - for iteration in handle_received_txns_iter { - match iteration { - Ok(_) => { - // This is expected. - } - Err(err) => { - panic!("Unexpected error: {:?}", err); - } - } - } - - let mut tx_receiver = tx_receiver; - for tx in txns { - match tx_receiver.next().await { - Some(received_txn) => { - assert_eq!(received_txn.tx, tx); - } - _ => { - panic!("Expected a TransactionMessage, but got something else"); - } - } - } - } - - #[test] - fn test_increment_block_size() { - let mut block_size_limits = - BlockSizeLimits::new(TEST_PROTOCOL_MAX_BLOCK_SIZE, Duration::from_millis(25)); - // Simulate decreased limits - block_size_limits.max_block_size = TEST_PROTOCOL_MAX_BLOCK_SIZE / 2; - - // Shouldn't increment, increment period hasn't passed yet - block_size_limits.try_increment_block_size(false); - assert!(block_size_limits.max_block_size == TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); - - // Should increment, increment period hasn't passed yet, but force flag is set - block_size_limits.try_increment_block_size(true); - assert!(block_size_limits.max_block_size > TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); - let new_size = block_size_limits.max_block_size; - - std::thread::sleep(Duration::from_millis(30)); - - // Should increment, increment period has passed - block_size_limits.try_increment_block_size(false); - assert!(block_size_limits.max_block_size > new_size); - } - - #[test] - fn test_decrement_block_size() { - let mut block_size_limits = BlockSizeLimits::new( - TEST_PROTOCOL_MAX_BLOCK_SIZE, - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - ); - block_size_limits.decrement_block_size(); - assert!(block_size_limits.max_block_size < TEST_PROTOCOL_MAX_BLOCK_SIZE); - } - - #[test] - fn test_max_block_size_floor() { - let mut block_size_limits = BlockSizeLimits::new( - BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + 1, - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - ); - block_size_limits.decrement_block_size(); - assert_eq!( - block_size_limits.max_block_size, - BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR - ); - } -} +// /// [`GlobalState`] represents the internalized state of the Builder service as +// /// represented from its public facing API. +// #[allow(clippy::type_complexity)] +// #[derive(Debug)] +// pub struct GlobalState { +// // data store for the blocks +// pub blocks: lru::LruCache, BlockInfo>, +// +// // registered builder states +// pub spawned_builder_states: HashMap< +// BuilderStateId, +// ( +// // This is provided as an Option for convenience with initialization. +// // When we build the initial state, we don't necessarily want to +// // have to generate a valid ParentBlockReferences object. As doing +// // such would require a bit of setup. Additionally it would +// // result in the call signature to `GlobalState::new` changing. +// // However for every subsequent BuilderState, we expect this value +// // to be populated. +// Option>, +// BroadcastSender>, +// ), +// >, +// +// // builder state -> last built block , it is used to respond the client +// // if the req channel times out during get_available_blocks +// pub builder_state_to_last_built_block: HashMap, ResponseMessage>, +// +// // sending a transaction from the hotshot/private mempool to the builder states +// // NOTE: Currently, we don't differentiate between the transactions from the hotshot and the private mempool +// pub tx_sender: BroadcastSender>>, +// +// // last garbage collected view number +// pub last_garbage_collected_view_num: Types::View, +// +// // highest view running builder task +// pub highest_view_num_builder_id: BuilderStateId, +// +// pub block_size_limits: BlockSizeLimits, +// +// /// Number of nodes. +// /// +// /// Initial value may be updated by the `claim_block_with_num_nodes` endpoint. +// pub num_nodes: usize, +// } +// +// /// `GetChannelForMatchingBuilderError` is an error enum that represents the +// /// class of possible errors that can be returned when calling +// /// `get_channel_for_matching_builder_or_highest_view_builder` on a +// /// `GlobalState`. These errors are used for internal representations for +// /// consistency and testing, and do not leak beyond the `GlobalState` API. +// /// As such, they intentionally do not implement traits for serialization. +// #[derive(Debug)] +// pub(crate) enum GetChannelForMatchingBuilderError { +// NoBuilderStateFound, +// } +// +// impl From for BuildError { +// fn from(_error: GetChannelForMatchingBuilderError) -> Self { +// BuildError::Error("No builder state found".to_string()) +// } +// } +// +// impl GlobalState { +// /// Creates a new [`GlobalState`] with the given parameters. +// /// The resulting [`GlobalState`] will have the given +// /// `last_garbage_collected_view_num` as passed. Additionally, the +// /// `highest_view_num_builder_id` will be set to a [`BuilderStateId`] +// /// comprised of the given `bootstrapped_builder_state_id` and +// /// `bootstrapped_view_num`. The `spawned_builder_states` will be created +// /// with a single entry of the same [`BuilderStateId`] and the given +// /// `bootstrap_sender`. +// /// `protocol_max_block_size` is maximum block size allowed by the protocol, +// /// e.g. `chain_config.max_block_size` for espresso-sequencer. +// /// `max_block_size_increment_period` determines the interval between attempts +// /// to increase the builder's block size limit if it is less than the protocol maximum. +// #[allow(clippy::too_many_arguments)] +// pub fn new( +// bootstrap_sender: BroadcastSender>, +// tx_sender: BroadcastSender>>, +// bootstrapped_builder_state_id: VidCommitment, +// bootstrapped_view_num: Types::View, +// last_garbage_collected_view_num: Types::View, +// max_block_size_increment_period: Duration, +// protocol_max_block_size: u64, +// num_nodes: usize, +// ) -> Self { +// let mut spawned_builder_states = HashMap::new(); +// let bootstrap_id = BuilderStateId { +// parent_commitment: bootstrapped_builder_state_id, +// parent_view: bootstrapped_view_num, +// }; +// spawned_builder_states.insert(bootstrap_id.clone(), (None, bootstrap_sender.clone())); +// GlobalState { +// blocks: LruCache::new(NonZeroUsize::new(256).unwrap()), +// spawned_builder_states, +// tx_sender, +// last_garbage_collected_view_num, +// builder_state_to_last_built_block: Default::default(), +// highest_view_num_builder_id: bootstrap_id, +// block_size_limits: BlockSizeLimits::new( +// protocol_max_block_size, +// max_block_size_increment_period, +// ), +// num_nodes, +// } +// } +// +// /// Associates the given [`BuilderStateId`] with +// /// the given [`BroadcastSender`] in the [`GlobalState`]. +// /// +// /// Additionally, if the view of the [`BuilderStateId`] is greater than the +// /// current highest view number, the [`BuilderStateId`] is set as the new +// /// highest view number. +// /// +// /// There is potential here for data loss. Since we just blindly insert +// /// the [`BuilderStateId`] and [`BroadcastSender`] into the hashmap, we could +// /// potentially be overwriting an existing entry. This would result in +// /// the loss of access to a [`BroadcastSender`], and could potentially +// /// result in unexpected behavior. +// pub fn register_builder_state( +// &mut self, +// parent_id: BuilderStateId, +// built_from_proposed_block: ParentBlockReferences, +// request_sender: BroadcastSender>, +// ) { +// // register the builder state +// let previous_value = self.spawned_builder_states.insert( +// parent_id.clone(), +// (Some(built_from_proposed_block), request_sender), +// ); +// +// if let Some(previous_value) = previous_value { +// tracing::warn!( +// "builder {parent_id} overwrote previous spawned_builder_state entry: {:?}", +// previous_value +// ); +// } +// +// // keep track of the max view number +// if parent_id.parent_view > self.highest_view_num_builder_id.parent_view { +// tracing::info!("registering builder {parent_id} as highest",); +// self.highest_view_num_builder_id = parent_id; +// } else { +// tracing::warn!( +// "builder {parent_id} created; highest registered is {}", +// self.highest_view_num_builder_id, +// ); +// } +// } +// +// /// Ensures that the given [`BuildBlockInfo`]'d id +// /// is within the [`GlobalState`]'s [`blocks`](GlobalState::blocks) LRU Cache. The cache stores the +// /// [`BlockInfo`] associated with the given [`BuildBlockInfo`]'s id. However +// /// if it already exists within the LRU cache, then the `BlockInfo` is not +// /// updated. +// /// +// /// Additionally, the [`BuilderStateId`] is associated with the given +// /// [`ResponseMessage`] in the [`Self::builder_state_to_last_built_block`] hashmap. +// /// +// /// No care or consideration is given to anything that may have been +// /// stored with the same key in the [`Self::builder_state_to_last_built_block`]. +// pub fn update_global_state( +// &mut self, +// state_id: BuilderStateId, +// build_block_info: BuildBlockInfo, +// response_msg: ResponseMessage, +// ) { +// let BuildBlockInfo { +// id, +// block_payload, +// metadata, +// vid_trigger, +// vid_receiver, +// offered_fee, +// truncated, +// .. +// } = build_block_info; +// +// let previous_cache_entry = self.blocks.put( +// id.clone(), +// BlockInfo { +// block_payload, +// metadata, +// vid_trigger: Arc::new(RwLock::new(Some(vid_trigger))), +// vid_receiver: Arc::new(RwLock::new(WaitAndKeep::Wait(vid_receiver))), +// offered_fee, +// truncated, +// }, +// ); +// +// // update the builder state to last built block +// let previous_builder_state_entry = self +// .builder_state_to_last_built_block +// .insert(state_id, response_msg); +// +// if let Some(previous_builder_state_entry) = previous_builder_state_entry { +// tracing::warn!( +// "block {id} overwrote previous block: {:?}. previous cache entry: {:?}", +// previous_builder_state_entry, +// previous_cache_entry +// ); +// } +// } +// +// /// Cleans up the [`GlobalState`] by removing all +// /// `spawned_builder_states` that have been stored, up to a derived +// /// reference view. This cutoff point can be up to the given +// /// `on_decide_view` so long as the provided value is less than or equal +// /// to the `highest_view_num_builder_id`'s view stored on the state. +// /// Beyond that, the state prefers to drop all `spawned_builder_states` +// /// preceding the derived cutoff view. +// /// +// /// In addition the `last_garbage_collected_view_num` is updated to the +// /// target cutoff view number for tracking purposes. The value returned +// /// is the cutoff view number such that the returned value indicates the +// /// point before which everything was cleaned up. +// pub fn remove_handles(&mut self, on_decide_view: Types::View) -> Types::View { +// // remove everything from the spawned builder states when view_num <= on_decide_view; +// // if we don't have a highest view > decide, use highest view as cutoff. +// let cutoff = std::cmp::min(self.highest_view_num_builder_id.parent_view, on_decide_view); +// self.spawned_builder_states +// .retain(|id, _| id.parent_view >= cutoff); +// +// let cutoff_u64 = cutoff.u64(); +// let gc_view = if cutoff_u64 > 0 { cutoff_u64 - 1 } else { 0 }; +// +// self.last_garbage_collected_view_num = Types::View::new(gc_view); +// +// cutoff +// } +// +// // private mempool submit txn +// // Currently, we don't differentiate between the transactions from the hotshot and the private mempool +// pub async fn submit_client_txns( +// &self, +// txns: Vec<::Transaction>, +// ) -> Vec::Transaction>, BuildError>> { +// handle_received_txns( +// &self.tx_sender, +// txns, +// TransactionSource::Private, +// self.block_size_limits.max_block_size, +// ) +// .await +// } +// +// /// Helper function that attempts to retrieve the broadcast sender for the given +// /// [`BuilderStateId`]. If the sender does not exist, it will return the +// /// broadcast sender for the for the hightest view number [`BuilderStateId`] +// /// instead. +// pub(crate) fn get_channel_for_matching_builder_or_highest_view_builder( +// &self, +// key: &BuilderStateId, +// ) -> Result<&BroadcastSender>, GetChannelForMatchingBuilderError> { +// if let Some(id_and_sender) = self.spawned_builder_states.get(key) { +// tracing::info!("Got matching builder for parent {}", key); +// Ok(&id_and_sender.1) +// } else { +// tracing::warn!( +// "failed to recover builder for parent {}, using highest view num builder with {}", +// key, +// self.highest_view_num_builder_id, +// ); +// // get the sender for the highest view number builder +// self.spawned_builder_states +// .get(&self.highest_view_num_builder_id) +// .map(|(_, sender)| sender) +// .ok_or(GetChannelForMatchingBuilderError::NoBuilderStateFound) +// } +// } +// +// // check for the existence of the builder state for a view +// pub fn check_builder_state_existence_for_a_view(&self, key: &Types::View) -> bool { +// // iterate over the spawned builder states and check if the view number exists +// self.spawned_builder_states +// .iter() +// .any(|(id, _)| id.parent_view == *key) +// } +// +// pub fn should_view_handle_other_proposals( +// &self, +// builder_view: &Types::View, +// proposal_view: &Types::View, +// ) -> bool { +// *builder_view == self.highest_view_num_builder_id.parent_view +// && !self.check_builder_state_existence_for_a_view(proposal_view) +// } +// } +// +// pub struct ProxyGlobalState { +// // global state +// global_state: Arc>>, +// +// // identity keys for the builder +// // May be ideal place as GlobalState interacts with hotshot apis +// // and then can sign on responders as desired +// builder_keys: ( +// Types::BuilderSignatureKey, // pub key +// <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, // private key +// ), +// +// // max waiting time to serve first api request +// max_api_waiting_time: Duration, +// } +// +// impl ProxyGlobalState { +// pub fn new( +// global_state: Arc>>, +// builder_keys: ( +// Types::BuilderSignatureKey, +// <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, +// ), +// max_api_waiting_time: Duration, +// ) -> Self { +// ProxyGlobalState { +// global_state, +// builder_keys, +// max_api_waiting_time, +// } +// } +// } +// +// /// `AvailableBlocksError` is an error enum that represents the class of possible +// /// errors that can be returned when calling `available_blocks` on a +// /// `ProxyGlobalState`. These errors are used for internal representations +// /// for consistency and testing, and do not leak beyond the `ProxyGlobalState` +// /// API. As such, they intentionally do not implement traits for serialization. +// #[derive(Debug)] +// enum AvailableBlocksError { +// SignatureValidationFailed, +// RequestForAvailableViewThatHasAlreadyBeenDecided, +// SigningBlockFailed( +// <::BuilderSignatureKey as BuilderSignatureKey>::SignError, +// ), +// GetChannelForMatchingBuilderError(GetChannelForMatchingBuilderError), +// NoBlocksAvailable, +// ChannelUnexpectedlyClosed, +// } +// +// impl From for AvailableBlocksError { +// fn from(error: GetChannelForMatchingBuilderError) -> Self { +// AvailableBlocksError::GetChannelForMatchingBuilderError(error) +// } +// } +// +// impl From> for BuildError { +// fn from(error: AvailableBlocksError) -> Self { +// match error { +// AvailableBlocksError::SignatureValidationFailed => { +// BuildError::Error("Signature validation failed in get_available_blocks".to_string()) +// } +// AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { +// BuildError::Error( +// "Request for available blocks for a view that has already been decided." +// .to_string(), +// ) +// } +// AvailableBlocksError::SigningBlockFailed(e) => { +// BuildError::Error(format!("Signing over block info failed: {:?}", e)) +// } +// AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), +// AvailableBlocksError::NoBlocksAvailable => { +// BuildError::Error("No blocks available".to_string()) +// } +// AvailableBlocksError::ChannelUnexpectedlyClosed => { +// BuildError::Error("Channel unexpectedly closed".to_string()) +// } +// } +// } +// } +// +// /// `ClaimBlockError` is an error enum that represents the class of possible +// /// errors that can be returned when calling `claim_block` on a +// /// `ProxyGlobalState`. These errors are used for internal representations +// /// for consistency and testing, and do not leak beyond the `ProxyGlobalState` +// /// API. As such, they intentionally do not implement traits for serialization. +// #[derive(Debug)] +// enum ClaimBlockError { +// SignatureValidationFailed, +// SigningCommitmentFailed( +// <::BuilderSignatureKey as BuilderSignatureKey>::SignError, +// ), +// BlockDataNotFound, +// } +// +// impl From> for BuildError { +// fn from(error: ClaimBlockError) -> Self { +// match error { +// ClaimBlockError::SignatureValidationFailed => { +// BuildError::Error("Signature validation failed in claim block".to_string()) +// } +// ClaimBlockError::SigningCommitmentFailed(e) => { +// BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) +// } +// ClaimBlockError::BlockDataNotFound => { +// BuildError::Error("Block data not found".to_string()) +// } +// } +// } +// } +// +// #[derive(Debug)] +// enum ClaimBlockHeaderInputError { +// SignatureValidationFailed, +// BlockHeaderNotFound, +// CouldNotGetVidInTime, +// WaitAndKeepGetError(WaitAndKeepGetError), +// FailedToSignVidCommitment( +// <::BuilderSignatureKey as BuilderSignatureKey>::SignError, +// ), +// FailedToSignFeeInfo( +// <::BuilderSignatureKey as BuilderSignatureKey>::SignError, +// ), +// } +// +// impl From> for BuildError { +// fn from(error: ClaimBlockHeaderInputError) -> Self { +// match error { +// ClaimBlockHeaderInputError::SignatureValidationFailed => BuildError::Error( +// "Signature validation failed in claim block header input".to_string(), +// ), +// ClaimBlockHeaderInputError::BlockHeaderNotFound => { +// BuildError::Error("Block header not found".to_string()) +// } +// ClaimBlockHeaderInputError::CouldNotGetVidInTime => { +// BuildError::Error("Couldn't get vid in time".to_string()) +// } +// ClaimBlockHeaderInputError::WaitAndKeepGetError(e) => e.into(), +// ClaimBlockHeaderInputError::FailedToSignVidCommitment(e) => { +// BuildError::Error(format!("Failed to sign VID commitment: {:?}", e)) +// } +// ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { +// BuildError::Error(format!("Failed to sign fee info: {:?}", e)) +// } +// } +// } +// } +// +// impl ProxyGlobalState { +// async fn available_blocks_implementation( +// &self, +// for_parent: &VidCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &::PureAssembledSignatureType, +// ) -> Result>, AvailableBlocksError> { +// let starting_time = Instant::now(); +// +// let state_id = BuilderStateId { +// parent_commitment: *for_parent, +// parent_view: Types::View::new(view_number), +// }; +// +// // verify the signature +// if !sender.validate(signature, state_id.parent_commitment.as_ref()) { +// tracing::error!("Signature validation failed in get_available_blocks"); +// return Err(AvailableBlocksError::SignatureValidationFailed); +// } +// +// tracing::info!("Requesting available blocks for {state_id}",); +// +// let view_num = state_id.parent_view; +// // check in the local spawned builder states +// // if it doesn't exist; there are three cases +// // 1) it has already been garbage collected (view < decide) and we should return an error +// // 2) it has not yet been created, and we should try to wait +// // 3) we missed the triggering event, and should use the BuilderState with the highest available view +// +// { +// // 1st case: Decide event received, and not bootstrapping. +// // If this `BlockBuilder` hasn't been reaped, it should have been. +// let global_state = self.global_state.read_arc().await; +// if view_num < global_state.last_garbage_collected_view_num +// && global_state.highest_view_num_builder_id.parent_view +// != global_state.last_garbage_collected_view_num +// { +// tracing::warn!( +// "Requesting for view {:?}, last decide-triggered cleanup on view {:?}, highest view num is {:?}", +// view_num, +// global_state.last_garbage_collected_view_num, +// global_state.highest_view_num_builder_id.parent_view +// ); +// return Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided); +// } +// } +// +// let (response_sender, response_receiver) = unbounded(); +// let req_msg = RequestMessage { +// state_id: state_id.clone(), +// response_channel: response_sender, +// }; +// let timeout_after = starting_time + self.max_api_waiting_time; +// let check_duration = self.max_api_waiting_time / 10; +// +// let time_to_wait_for_matching_builder = starting_time + self.max_api_waiting_time / 2; +// +// let mut sent = false; +// while Instant::now() < time_to_wait_for_matching_builder { +// // try to broadcast the request to the correct builder state +// let found_builder_state = { +// let global_state_read_lock_guard = self.global_state.read_arc().await; +// +// global_state_read_lock_guard +// .spawned_builder_states +// .get(&state_id) +// .cloned() +// }; +// +// if let Some(id_and_sender) = found_builder_state { +// tracing::info!( +// "Got matching BlockBuilder for {state_id}, sending get_available_blocks request", +// ); +// +// if let Err(e) = id_and_sender +// .1 +// .broadcast(MessageType::RequestMessage(req_msg.clone())) +// .await +// { +// tracing::warn!("Error {e} sending get_available_blocks request for {state_id}",); +// } +// sent = true; +// break; +// } +// +// tracing::info!("Failed to get matching BlockBuilder for {state_id}, will try again",); +// async_sleep(check_duration).await; +// } +// +// if !sent { +// // broadcast the request to the best fallback builder state +// if let Err(e) = self +// .global_state +// .read_arc() +// .await +// .get_channel_for_matching_builder_or_highest_view_builder(&state_id)? +// .broadcast(MessageType::RequestMessage(req_msg.clone())) +// .await +// { +// tracing::warn!( +// "Error {e} sending get_available_blocks request for parent {state_id}", +// ); +// } +// } +// +// tracing::debug!("Waiting for response for get_available_blocks with parent {state_id}",); +// +// let response_received = loop { +// match async_timeout(check_duration, response_receiver.recv()).await { +// Err(toe) => { +// if Instant::now() >= timeout_after { +// tracing::debug!(%toe, "Couldn't get available blocks in time for parent {state_id}"); +// // lookup into the builder_state_to_last_built_block, if it contains the result, return that otherwise return error +// if let Some(last_built_block) = self +// .global_state +// .read_arc() +// .await +// .builder_state_to_last_built_block +// .get(&state_id) +// { +// tracing::info!("Returning last built block for parent {state_id}",); +// break Ok(last_built_block.clone()); +// } +// break Err(AvailableBlocksError::NoBlocksAvailable); +// } +// continue; +// } +// Ok(recv_attempt) => { +// if let Err(ref e) = recv_attempt { +// tracing::error!(%e, "Channel closed while getting available blocks for parent {state_id}"); +// } +// break recv_attempt +// .map_err(|_| AvailableBlocksError::ChannelUnexpectedlyClosed); +// } +// } +// }; +// +// match response_received { +// Ok(response) => { +// let (pub_key, sign_key) = self.builder_keys.clone(); +// // sign over the block info +// let signature_over_block_info = +// ::BuilderSignatureKey::sign_block_info( +// &sign_key, +// response.block_size, +// response.offered_fee, +// &response.builder_hash, +// ) +// .map_err(AvailableBlocksError::SigningBlockFailed)?; +// +// // insert the block info into local hashmap +// let initial_block_info = AvailableBlockInfo:: { +// block_hash: response.builder_hash.clone(), +// block_size: response.block_size, +// offered_fee: response.offered_fee, +// signature: signature_over_block_info, +// sender: pub_key.clone(), +// _phantom: Default::default(), +// }; +// tracing::info!( +// "Sending available Block info response for {state_id} with block hash: {:?}", +// response.builder_hash +// ); +// Ok(vec![initial_block_info]) +// } +// +// // We failed to get available blocks +// Err(e) => { +// tracing::debug!("Failed to get available blocks for parent {state_id}",); +// Err(e) +// } +// } +// } +// +// async fn claim_block_implementation( +// &self, +// block_hash: &BuilderCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, +// ) -> Result, ClaimBlockError> { +// let block_id = BlockId { +// hash: block_hash.clone(), +// view: Types::View::new(view_number), +// }; +// +// tracing::info!("Received request for claiming block {block_id}",); +// // verify the signature +// if !sender.validate(signature, block_id.hash.as_ref()) { +// tracing::error!("Signature validation failed in claim block"); +// return Err(ClaimBlockError::SignatureValidationFailed); +// } +// let (pub_key, sign_key) = self.builder_keys.clone(); +// +// let extracted_block_info_option = { +// // We store this write lock guard separately to make it explicit +// // that this will end up holding a lock for the duration of this +// // closure. +// // +// // Additionally, we clone the properties from the block_info that +// // end up being cloned if found anyway. Since we know this already +// // we can perform the clone here to avoid holding the lock for +// // longer than needed. +// let mut global_state_write_lock_guard = self.global_state.write_arc().await; +// let block_info_some = global_state_write_lock_guard.blocks.get(&block_id); +// +// block_info_some.map(|block_info| { +// ( +// block_info.vid_trigger.clone(), +// block_info.block_payload.clone(), +// block_info.metadata.clone(), +// ) +// }) +// }; +// +// if let Some((vid_trigger, block_payload, metadata)) = extracted_block_info_option { +// tracing::info!("Trying sending vid trigger info for {block_id}",); +// +// if let Some(trigger_writer) = vid_trigger.write().await.take() { +// tracing::info!("Sending vid trigger for {block_id}"); +// trigger_writer.send(TriggerStatus::Start); +// tracing::info!("Sent vid trigger for {block_id}"); +// } +// tracing::info!("Done Trying sending vid trigger info for {block_id}",); +// +// // sign over the builder commitment, as the proposer can computer it based on provide block_payload +// // and the metadata +// let response_block_hash = block_payload.builder_commitment(&metadata); +// let signature_over_builder_commitment = +// ::BuilderSignatureKey::sign_builder_message( +// &sign_key, +// response_block_hash.as_ref(), +// ) +// .map_err(ClaimBlockError::SigningCommitmentFailed)?; +// +// let block_data = AvailableBlockData:: { +// block_payload: block_payload.clone(), +// metadata: metadata.clone(), +// signature: signature_over_builder_commitment, +// sender: pub_key.clone(), +// }; +// tracing::info!("Sending Claim Block data for {block_id}",); +// Ok(block_data) +// } else { +// tracing::warn!("Claim Block not found"); +// Err(ClaimBlockError::BlockDataNotFound) +// } +// } +// +// async fn claim_block_header_input_implementation( +// &self, +// block_hash: &BuilderCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, +// ) -> Result, ClaimBlockHeaderInputError> { +// let id = BlockId { +// hash: block_hash.clone(), +// view: Types::View::new(view_number), +// }; +// +// tracing::info!("Received request for claiming block header input for block {id}"); +// // verify the signature +// if !sender.validate(signature, id.hash.as_ref()) { +// tracing::error!("Signature validation failed in claim block header input"); +// return Err(ClaimBlockHeaderInputError::SignatureValidationFailed); +// } +// let (pub_key, sign_key) = self.builder_keys.clone(); +// +// let extracted_block_info_option = { +// // We store this write lock guard separately to make it explicit +// // that this will end up holding a lock for the duration of this +// // closure. +// // +// // Additionally, we clone the properties from the block_info that +// // end up being cloned if found anyway. Since we know this already +// // we can perform the clone here to avoid holding the lock for +// // longer than needed. +// let mut global_state_write_lock_guard = self.global_state.write_arc().await; +// let block_info_some = global_state_write_lock_guard.blocks.get(&id); +// +// block_info_some.map(|block_info| { +// ( +// block_info.vid_receiver.clone(), +// block_info.metadata.clone(), +// block_info.offered_fee, +// block_info.truncated, +// ) +// }) +// }; +// +// if let Some((vid_receiver, metadata, offered_fee, truncated)) = extracted_block_info_option +// { +// tracing::info!("Waiting for vid commitment for block {id}"); +// +// let timeout_after = Instant::now() + self.max_api_waiting_time; +// let check_duration = self.max_api_waiting_time / 10; +// +// let response_received = loop { +// match async_timeout(check_duration, vid_receiver.write().await.get()).await { +// Err(_toe) => { +// if Instant::now() >= timeout_after { +// tracing::warn!("Couldn't get vid commitment in time for block {id}",); +// { +// // we can't keep up with this block size, reduce max block size +// self.global_state +// .write_arc() +// .await +// .block_size_limits +// .decrement_block_size(); +// } +// break Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime); +// } +// continue; +// } +// Ok(recv_attempt) => { +// if recv_attempt.is_err() { +// tracing::error!( +// "Channel closed while getting vid commitment for block {id}", +// ); +// } +// break recv_attempt +// .map_err(ClaimBlockHeaderInputError::WaitAndKeepGetError); +// } +// } +// }; +// +// tracing::info!("Got vid commitment for block {id}",); +// +// // We got VID in time with margin left. +// // Maybe we can handle bigger blocks? +// if timeout_after.duration_since(Instant::now()) +// > self.max_api_waiting_time / VID_RESPONSE_TARGET_MARGIN_DIVISOR +// { +// // Increase max block size +// self.global_state +// .write_arc() +// .await +// .block_size_limits +// .try_increment_block_size(truncated); +// } +// +// match response_received { +// Ok((vid_commitment, vid_precompute_data)) => { +// // sign over the vid commitment +// let signature_over_vid_commitment = +// ::BuilderSignatureKey::sign_builder_message( +// &sign_key, +// vid_commitment.as_ref(), +// ) +// .map_err(ClaimBlockHeaderInputError::FailedToSignVidCommitment)?; +// +// let signature_over_fee_info = Types::BuilderSignatureKey::sign_fee( +// &sign_key, +// offered_fee, +// &metadata, +// &vid_commitment, +// ) +// .map_err(ClaimBlockHeaderInputError::FailedToSignFeeInfo)?; +// +// let response = AvailableBlockHeaderInput:: { +// vid_commitment, +// vid_precompute_data, +// fee_signature: signature_over_fee_info, +// message_signature: signature_over_vid_commitment, +// sender: pub_key.clone(), +// }; +// tracing::info!("Sending Claim Block Header Input response for {id}",); +// Ok(response) +// } +// Err(err) => { +// tracing::warn!("Claim Block Header Input not found"); +// Err(err) +// } +// } +// } else { +// tracing::warn!("Claim Block Header Input not found"); +// Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) +// } +// } +// } +// +// /* +// Handling Builder API responses +// */ +// #[async_trait] +// impl BuilderDataSource for ProxyGlobalState +// where +// for<'a> <::PureAssembledSignatureType as TryFrom< +// &'a TaggedBase64, +// >>::Error: Display, +// for<'a> >::Error: Display, +// { +// async fn available_blocks( +// &self, +// for_parent: &VidCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &::PureAssembledSignatureType, +// ) -> Result>, BuildError> { +// Ok(self +// .available_blocks_implementation(for_parent, view_number, sender, signature) +// .await?) +// } +// +// async fn claim_block( +// &self, +// block_hash: &BuilderCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, +// ) -> Result, BuildError> { +// Ok(self +// .claim_block_implementation(block_hash, view_number, sender, signature) +// .await?) +// } +// +// async fn claim_block_with_num_nodes( +// &self, +// block_hash: &BuilderCommitment, +// view_number: u64, +// sender: ::SignatureKey, +// signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, +// num_nodes: usize, +// ) -> Result, BuildError> { +// // Update the stored `num_nodes` with the given value, which will be used for VID computation. +// self.global_state.write_arc().await.num_nodes = num_nodes; +// +// self.claim_block(block_hash, view_number, sender, signature) +// .await +// } +// +// async fn claim_block_header_input( +// &self, +// block_hash: &BuilderCommitment, +// view_number: u64, +// sender: Types::SignatureKey, +// signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, +// ) -> Result, BuildError> { +// Ok(self +// .claim_block_header_input_implementation(block_hash, view_number, sender, signature) +// .await?) +// } +// +// /// Returns the public key of the builder +// async fn builder_address( +// &self, +// ) -> Result<::BuilderSignatureKey, BuildError> { +// Ok(self.builder_keys.0.clone()) +// } +// } +// +// #[async_trait] +// impl AcceptsTxnSubmits for ProxyGlobalState { +// async fn submit_txns( +// &self, +// txns: Vec<::Transaction>, +// ) -> Result::Transaction>>, BuildError> { +// tracing::debug!( +// "Submitting {:?} transactions to the builder states{:?}", +// txns.len(), +// txns.iter().map(|txn| txn.commit()).collect::>() +// ); +// let response = self +// .global_state +// .read_arc() +// .await +// .submit_client_txns(txns) +// .await; +// +// tracing::debug!( +// "Transaction submitted to the builder states, sending response: {:?}", +// response +// ); +// +// // NOTE: ideally we want to respond with original Vec +// // instead of Result not to loose any information, +// // but this requires changes to builder API +// response.into_iter().collect() +// } +// } +// #[async_trait] +// impl ReadState for ProxyGlobalState { +// type State = ProxyGlobalState; +// +// async fn read( +// &self, +// op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, +// ) -> T { +// op(self).await +// } +// } +// +// /* +// Running Non-Permissioned Builder Service +// */ +// pub async fn run_non_permissioned_standalone_builder_service< +// Types: NodeType, +// Ver: StaticVersionType, +// S: Stream> + Unpin, +// >( +// // sending a DA proposal from the hotshot to the builder states +// da_sender: BroadcastSender>, +// +// // sending a Quorum proposal from the hotshot to the builder states +// quorum_sender: BroadcastSender>, +// +// // sending a Decide event from the hotshot to the builder states +// decide_sender: BroadcastSender>, +// +// // HotShot event stream +// hotshot_event_stream: S, +// +// // Global state +// global_state: Arc>>, +// ) -> Result<(), anyhow::Error> { +// let tx_sender = { +// // This closure is likely unnecessary, but we want to play it safe +// // with our RWLocks. +// let global_state_read_lock_guard = global_state.read_arc().await; +// global_state_read_lock_guard.tx_sender.clone() +// }; +// let mut hotshot_event_stream = std::pin::pin!(hotshot_event_stream); +// +// loop { +// let Some(event) = hotshot_event_stream.next().await else { +// anyhow::bail!("Event stream ended"); +// }; +// +// match event.event { +// EventType::Error { error } => { +// tracing::error!("Error event in HotShot: {:?}", error); +// } +// // tx event +// EventType::Transactions { transactions } => { +// let max_block_size = { +// // This closure is likely unnecessary, but we want +// // to play it safe with our RWLocks. +// let global_state_read_lock_guard = global_state.read_arc().await; +// global_state_read_lock_guard +// .block_size_limits +// .max_block_size +// }; +// +// handle_received_txns( +// &tx_sender, +// transactions, +// TransactionSource::Public, +// max_block_size, +// ) +// .await; +// } +// // decide event +// EventType::Decide { +// block_size: _, +// leaf_chain, +// qc: _, +// } => { +// let latest_decide_view_num = leaf_chain[0].leaf.view_number(); +// handle_decide_event(&decide_sender, latest_decide_view_num).await; +// } +// // DA proposal event +// EventType::DaProposal { proposal, sender } => { +// handle_da_event(&da_sender, Arc::new(proposal), sender).await; +// } +// // QC proposal event +// EventType::QuorumProposal { proposal, sender } => { +// // get the leader for current view +// handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; +// } +// _ => { +// tracing::debug!("Unhandled event from Builder"); +// } +// } +// } +// } +// +// /// [`HandleDaEventError`] represents the internal class of errors that can +// /// occur when attempting to process an incoming da proposal event. More +// /// specifically these are the class of error that can be returned from +// /// [`handle_da_event_implementation`]. +// #[derive(Debug)] +// enum HandleDaEventError { +// SignatureValidationFailed, +// BroadcastFailed(async_broadcast::SendError>), +// } +// +// /// [`handle_da_event`] is a utility function that will attempt to broadcast the +// /// given `da_proposal` to the given `da_channel_sender` if the given details +// /// pass validation checks, and the [`BroadcastSender`] `da_channel_sender` is +// /// still open. +// async fn handle_da_event( +// da_channel_sender: &BroadcastSender>, +// da_proposal: Arc>>, +// sender: ::SignatureKey, +// ) { +// // We're explicitly not inspecting this error, as this function is not +// // expected to return an error or any indication of an error. +// let _ = handle_da_event_implementation(da_channel_sender, da_proposal, sender).await; +// } +// +// /// [`handle_da_event_implementation`] is a utility function that will attempt +// /// to broadcast the given `da_proposal` to the given `da_channel_sender` if the +// /// given details pass all relevant checks. +// /// +// /// There are only three conditions under which this will fail to send the +// /// message via the given `da_channel_sender`, and they are all represented +// /// via [`HandleDaEventError`]. They are as follows: +// /// - [`HandleDaEventError::SignatureValidationFailed`]: The signature validation failed +// /// - [`HandleDaEventError::BroadcastFailed`]: The broadcast failed as no receiver +// /// is in place to receive the message +// /// +// /// This function is the implementation for [`handle_da_event`]. +// async fn handle_da_event_implementation( +// da_channel_sender: &BroadcastSender>, +// da_proposal: Arc>>, +// sender: ::SignatureKey, +// ) -> Result<(), HandleDaEventError> { +// tracing::debug!( +// "DaProposal: Leader: {:?} for the view: {:?}", +// sender, +// da_proposal.data.view_number +// ); +// +// // get the encoded transactions hash +// let encoded_txns_hash = Sha256::digest(&da_proposal.data.encoded_transactions); +// // check if the sender is the leader and the signature is valid; if yes, broadcast the DA proposal +// +// if !sender.validate(&da_proposal.signature, &encoded_txns_hash) { +// tracing::error!( +// "Validation Failure on DaProposal for view {:?}: Leader: {:?}", +// da_proposal.data.view_number, +// sender +// ); +// return Err(HandleDaEventError::SignatureValidationFailed); +// } +// +// let da_msg = DaProposalMessage:: { +// proposal: da_proposal, +// sender, +// }; +// +// let view_number = da_msg.proposal.data.view_number; +// tracing::debug!( +// "Sending DA proposal to the builder states for view {:?}", +// view_number +// ); +// +// if let Err(e) = da_channel_sender +// .broadcast(MessageType::DaProposalMessage(da_msg)) +// .await +// { +// tracing::warn!( +// "Error {e}, failed to send DA proposal to builder states for view {:?}", +// view_number +// ); +// +// return Err(HandleDaEventError::BroadcastFailed(e)); +// } +// +// Ok(()) +// } +// +// /// [`HandleQuorumEventError`] represents the internal class of errors that can +// /// occur when attempting to process an incoming quorum proposal event. More +// /// specifically these are the class of error that can be returned from +// /// [`handle_quorum_event_implementation`]. +// #[derive(Debug)] +// enum HandleQuorumEventError { +// SignatureValidationFailed, +// BroadcastFailed(async_broadcast::SendError>), +// } +// +// /// [`handle_quorum_event`] is a utility function that will attempt to broadcast the +// /// given `quorum_proposal` to the given `quorum_channel_sender` if the given details +// /// pass validation checks, and the [`BroadcastSender`] `quorum_channel_sender` is +// /// still open. +// async fn handle_quorum_event( +// quorum_channel_sender: &BroadcastSender>, +// quorum_proposal: Arc>>, +// sender: ::SignatureKey, +// ) { +// // We're explicitly not inspecting this error, as this function is not +// // expected to return an error or any indication of an error. +// let _ = +// handle_quorum_event_implementation(quorum_channel_sender, quorum_proposal, sender).await; +// } +// +// /// Utility function that will attempt to broadcast the given `quorum_proposal` +// /// to the given `quorum_channel_sender` if the given details pass all relevant checks. +// /// +// /// There are only three conditions under which this will fail to send the +// /// message via the given `quorum_channel_sender`, and they are all represented +// /// via [`HandleQuorumEventError`]. They are as follows: +// /// - [`HandleQuorumEventError::SignatureValidationFailed`]: The signature validation failed +// /// - [`HandleQuorumEventError::BroadcastFailed`]: The broadcast failed as no receiver +// /// is in place to receive the message +// /// +// /// This function is the implementation for [`handle_quorum_event`]. +// async fn handle_quorum_event_implementation( +// quorum_channel_sender: &BroadcastSender>, +// quorum_proposal: Arc>>, +// sender: ::SignatureKey, +// ) -> Result<(), HandleQuorumEventError> { +// tracing::debug!( +// "QuorumProposal: Leader: {:?} for the view: {:?}", +// sender, +// quorum_proposal.data.view_number +// ); +// +// let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); +// +// if !sender.validate(&quorum_proposal.signature, leaf.legacy_commit().as_ref()) { +// tracing::error!( +// "Validation Failure on QuorumProposal for view {:?}: Leader for the current view: {:?}", +// quorum_proposal.data.view_number, +// sender +// ); +// return Err(HandleQuorumEventError::SignatureValidationFailed); +// } +// +// let quorum_msg = QuorumProposalMessage:: { +// proposal: quorum_proposal, +// sender, +// }; +// let view_number = quorum_msg.proposal.data.view_number; +// tracing::debug!( +// "Sending Quorum proposal to the builder states for view {:?}", +// view_number +// ); +// +// if let Err(e) = quorum_channel_sender +// .broadcast(MessageType::QuorumProposalMessage(quorum_msg)) +// .await +// { +// tracing::warn!( +// "Error {e}, failed to send Quorum proposal to builder states for view {:?}", +// view_number +// ); +// return Err(HandleQuorumEventError::BroadcastFailed(e)); +// } +// +// Ok(()) +// } +// +// async fn handle_decide_event( +// decide_channel_sender: &BroadcastSender>, +// latest_decide_view_number: Types::View, +// ) { +// let decide_msg: DecideMessage = DecideMessage:: { +// latest_decide_view_number, +// }; +// tracing::debug!( +// "Sending Decide event to builder states for view {:?}", +// latest_decide_view_number +// ); +// if let Err(e) = decide_channel_sender +// .broadcast(MessageType::DecideMessage(decide_msg)) +// .await +// { +// tracing::warn!( +// "Error {e}, failed to send Decide event to builder states for view {:?}", +// latest_decide_view_number +// ); +// } +// } +// +// #[derive(Debug)] +// enum HandleReceivedTxnsError { +// TransactionTooBig { +// estimated_length: u64, +// max_txn_len: u64, +// }, +// +// TooManyTransactions, +// +// Internal(TrySendError>>), +// } +// +// impl From> for BuildError { +// fn from(error: HandleReceivedTxnsError) -> Self { +// match error { +// HandleReceivedTxnsError::TransactionTooBig { +// estimated_length, +// max_txn_len, +// } => BuildError::Error(format!("Transaction too big (estimated length {estimated_length}, currently accepting <= {max_txn_len})")), +// HandleReceivedTxnsError::TooManyTransactions => BuildError::Error("Too many transactions".to_owned()), +// HandleReceivedTxnsError::Internal(err) => BuildError::Error(format!("Internal error when submitting transaction: {}", err)), +// } +// } +// } +// +// impl From>>> +// for HandleReceivedTxnsError +// { +// fn from(err: TrySendError>>) -> Self { +// match err { +// TrySendError::Full(_) => HandleReceivedTxnsError::TooManyTransactions, +// err => HandleReceivedTxnsError::Internal(err), +// } +// } +// } +// +// /// Utility function that will take the given list +// /// of transactions, `txns`, wraps them in a [`ReceivedTransaction`] struct +// /// and attempt to broadcast them to the given transaction [`BroadcastSender`] +// /// `tx_sender`. The broadcast itself it a non-blocking operation, and any +// /// failures of the broadcast are collected into the returned vector +// /// of [Result]s. +// /// +// /// There is also a `max_txn_len` parameter that is used to check to ensure +// /// that transactions that exceed this threshold will also not be broadcasted. +// pub(crate) async fn handle_received_txns( +// tx_sender: &BroadcastSender>>, +// txns: Vec, +// source: TransactionSource, +// max_txn_len: u64, +// ) -> Vec::Transaction>, BuildError>> { +// HandleReceivedTxns::new(tx_sender.clone(), txns, source, max_txn_len) +// .map(|res| res.map_err(Into::into)) +// .collect() +// } +// +// /// `HandleReceivedTxns` is a struct that is used to handle the processing of +// /// the function [`handle_received_txns`]. In order to avoid the need to +// /// double allocate a [Vec] from processing these entries, this struct exists +// /// to be processed as an [Iterator] instead. +// struct HandleReceivedTxns { +// tx_sender: BroadcastSender>>, +// txns: Vec, +// source: TransactionSource, +// max_txn_len: u64, +// offset: usize, +// txns_length: usize, +// time_in: Instant, +// } +// +// impl HandleReceivedTxns { +// fn new( +// tx_sender: BroadcastSender>>, +// txns: Vec, +// source: TransactionSource, +// max_txn_len: u64, +// ) -> Self { +// let txns_length = txns.len(); +// +// Self { +// tx_sender, +// txns, +// source, +// max_txn_len, +// offset: 0, +// txns_length, +// time_in: Instant::now(), +// } +// } +// } +// +// impl Iterator for HandleReceivedTxns +// where +// Types::Transaction: Transaction, +// { +// type Item = +// Result::Transaction>, HandleReceivedTxnsError>; +// +// fn next(&mut self) -> Option { +// if self.txns.is_empty() { +// return None; +// } +// +// if self.offset >= self.txns_length { +// return None; +// } +// +// let offset = self.offset; +// // increment the offset so we can ensure we're making progress; +// self.offset += 1; +// +// let tx = self.txns[offset].clone(); +// let commit = tx.commit(); +// // This is a rough estimate, but we don't have any other way to get real +// // encoded transaction length. Luckily, this being roughly proportional +// // to encoded length is enough, because we only use this value to estimate +// // our limitations on computing the VID in time. +// let min_block_size = tx.minimum_block_size(); +// let max_txn_len = self.max_txn_len; +// if min_block_size > max_txn_len { +// tracing::warn!(%commit, %min_block_size, %max_txn_len, "Transaction too big"); +// return Some(Err(HandleReceivedTxnsError::TransactionTooBig { +// estimated_length: min_block_size, +// max_txn_len: self.max_txn_len, +// })); +// } +// +// let res = self +// .tx_sender +// .try_broadcast(Arc::new(ReceivedTransaction { +// transaction: tx, +// source: self.source.clone(), +// commit, +// time_in: self.time_in, +// min_block_size, +// })) +// .inspect(|val| { +// if let Some(evicted_txn) = val { +// tracing::warn!( +// "Overflow mode enabled, transaction {} evicted", +// evicted_txn.commit +// ); +// } +// }) +// .map(|_| commit) +// .inspect_err(|err| { +// tracing::warn!("Failed to broadcast txn with commit {:?}: {}", commit, err); +// }) +// .map_err(HandleReceivedTxnsError::from); +// +// Some(res) +// } +// +// fn size_hint(&self) -> (usize, Option) { +// ( +// self.txns_length - self.offset, +// Some(self.txns.capacity() - self.offset), +// ) +// } +// } +// +// #[cfg(all(test, not(test)))] +// mod test { +// use std::{sync::Arc, time::Duration}; +// +// use async_compatibility_layer::channel::unbounded; +// use async_lock::RwLock; +// use committable::Commitment; +// use futures::StreamExt; +// use hotshot::{ +// traits::BlockPayload, +// types::{BLSPubKey, SignatureKey}, +// }; +// use hotshot_builder_api::v0_2::block_info::AvailableBlockInfo; +// use hotshot_example_types::{ +// block_types::{TestBlockPayload, TestMetadata, TestTransaction}, +// node_types::{TestTypes, TestVersions}, +// state_types::{TestInstanceState, TestValidatedState}, +// }; +// use hotshot_types::{ +// data::{DaProposal, Leaf, QuorumProposal, ViewNumber}, +// message::Proposal, +// simple_certificate::QuorumCertificate, +// traits::{ +// block_contents::{precompute_vid_commitment, vid_commitment}, +// node_implementation::ConsensusTime, +// signature_key::BuilderSignatureKey, +// }, +// utils::BuilderCommitment, +// }; +// use marketplace_builder_shared::{ +// block::{BlockId, BuilderStateId, ParentBlockReferences}, +// testing::constants::{ +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_NUM_NODES_IN_VID_COMPUTATION, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// }, +// utils::LegacyCommit, +// }; +// use sha2::{Digest, Sha256}; +// +// use crate::{ +// builder_state::{ +// BuildBlockInfo, MessageType, RequestMessage, ResponseMessage, TransactionSource, +// TriggerStatus, +// }, +// service::{BlockSizeLimits, HandleReceivedTxnsError}, +// }; +// +// use super::{ +// handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, +// BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, +// HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, +// }; +// +// /// A const number on `max_tx_len` to be used consistently spanning all the tests +// /// It is set to 1 as current estimation on `TestTransaction` is 1 +// const TEST_MAX_TX_LEN: u64 = 1; +// +// // GlobalState Tests +// +// // GlobalState::new Tests +// +// /// This test checks a [GlobalState] created from [GlobalState::new] has +// /// the appropriate values stored within it. +// #[async_std::test] +// async fn test_global_state_new() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(1), +// ViewNumber::new(2), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// assert_eq!(state.blocks.len(), 0, "The blocks LRU should be empty"); +// +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(1), +// }; +// +// // There should be a single entry within the spawned_builder_states, +// // and it should be the one that was just created. +// assert_eq!( +// state.spawned_builder_states.len(), +// 1, +// "There should be a single entry in the spawned builder states hashmap" +// ); +// +// assert!(state.spawned_builder_states.contains_key(&builder_state_id), "The spawned builder states should contain an entry with the bootstrapped parameters passed into new"); +// +// assert!(!state.spawned_builder_states.contains_key(&BuilderStateId { parent_commitment: parent_commit, parent_view: ViewNumber::new(0) }), "The spawned builder states should not contain any other entry, as such it should not contain any entry with a higher view number, but the same parent commit"); +// +// // We can't compare the Senders directly +// +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(2), +// "The last garbage collected view number should be the one passed into new" +// ); +// +// assert_eq!( +// state.builder_state_to_last_built_block.len(), +// 0, +// "The builder state to last built block should be empty" +// ); +// +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should be the bootstrapped build state id" +// ); +// +// assert_eq!( +// state.block_size_limits.protocol_max_block_size, TEST_PROTOCOL_MAX_BLOCK_SIZE, +// "The protocol max block size should be the one passed into new" +// ); +// +// assert_eq!( +// state.block_size_limits.max_block_size, state.block_size_limits.protocol_max_block_size, +// "The max block size should be initialized to protocol max block size" +// ); +// } +// +// // GlobalState::register_builder_state Tests +// +// /// This test checks that the [GlobalState::register_builder_state] function +// /// will correctly register a new builder state, and that the highest view +// /// number builder id will be updated to the new builder state id. +// /// Additionally, it will check that the spawned builder states hashmap +// /// will contain the new builder state id. +// #[async_std::test] +// async fn test_global_state_register_builder_state_different_states() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// { +// let (req_sender, _) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(5), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 2, +// "The spawned_builder_states should now have 2 elements in it" +// ); +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should now be the one that was just registered" +// ); +// assert!( +// state.spawned_builder_states.contains_key(&builder_state_id), +// "The spawned builder states should contain the new builder state id" +// ); +// }; +// +// { +// let (req_sender, _) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(6), +// }; +// +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(6), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 3, +// "The spawned_builder_states should now have 3 elements in it" +// ); +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should now be the one that was just registered" +// ); +// assert!( +// state.spawned_builder_states.contains_key(&builder_state_id), +// "The spawned builder states should contain the new builder state id" +// ); +// }; +// } +// +// /// This test checks that the register_builder_state method will overwrite +// /// the previous sender in the `spawned_builder_states` hashmap if the same +// /// `BuilderStateId` is used to register a new sender. +// /// +// /// It also demonstrates that doing this will drop the previous sender, +// /// effectively closing it if it is the only reference to it. +// #[async_std::test] +// async fn test_global_state_register_builder_state_same_builder_state_id() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// let mut req_receiver_1 = { +// let (req_sender, req_receiver) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(5), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 2, +// "The spawned_builder_states should now have 2 elements in it" +// ); +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should now be the one that was just registered" +// ); +// +// req_receiver +// }; +// +// let mut req_receiver_2 = { +// let (req_sender, req_receiver) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// +// // This is the same BuilderStateId as the previous one, so it should +// // replace the previous one. Which means that the previous one +// // may no longer be published to. +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(5), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 2, +// "The spawned_builder_states should still have 2 elements in it" +// ); +// assert_eq!(state.highest_view_num_builder_id, builder_state_id, "The highest view number builder id should still be the one that was just registered"); +// +// req_receiver +// }; +// +// { +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// +// let req_id_and_sender = state.spawned_builder_states.get(&builder_state_id).unwrap(); +// let (response_sender, _) = unbounded(); +// +// assert!( +// req_id_and_sender +// .1 +// .broadcast(MessageType::RequestMessage(RequestMessage { +// state_id: builder_state_id, +// response_channel: response_sender, +// })) +// .await +// .is_ok(), +// "This should be able to send a Message through the sender" +// ); +// } +// +// // The first receiver should have been replaced, so we won't get any +// // results from it. +// +// assert!( +// req_receiver_1.recv().await.is_err(), +// "This first receiver should be closed" +// ); +// assert!( +// req_receiver_2.recv().await.is_ok(), +// "The second receiver should receive a message" +// ); +// } +// +// /// This test checks that the register_builder_state method will only +// /// update the highest_view_num_builder_id if the new [BuilderStateId] has +// /// a higher view number than the current highest_view_num_builder_id. +// #[async_std::test] +// async fn test_global_state_register_builder_state_decrementing_builder_state_ids() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// { +// let (req_sender, _) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(6), +// }; +// +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(6), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 2, +// "The spawned_builder_states should now have 2 elements in it" +// ); +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should now be the one that was just registered" +// ); +// assert!( +// state.spawned_builder_states.contains_key(&builder_state_id), +// "The spawned builder states should contain the new builder state id" +// ); +// }; +// +// { +// let (req_sender, _) = async_broadcast::broadcast(10); +// let builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// +// state.register_builder_state( +// builder_state_id.clone(), +// ParentBlockReferences { +// view_number: ViewNumber::new(5), +// vid_commitment: parent_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// req_sender.clone(), +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 3, +// "The spawned_builder_states should now have 3 elements in it" +// ); +// assert_eq!( +// state.highest_view_num_builder_id, +// BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(6) +// }, +// "The highest view number builder id should now be the one that was just registered" +// ); +// assert!( +// state.spawned_builder_states.contains_key(&builder_state_id), +// "The spawned builder states should contain the new builder state id" +// ); +// }; +// } +// +// // GlobalState::update_global_state Tests +// +// /// This test checks that the update_global_state method will correctly +// /// update the LRU blocks cache and the builder_state_to_last_built_block +// /// hashmap with values derived from the parameters passed into the method. +// /// +// /// The assumption behind this test is that the values being stored were +// /// not being stored previously. +// #[async_std::test] +// async fn test_global_state_update_global_state_success() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// let new_parent_commit = vid_commitment(&[], 9); +// let new_view_num = ViewNumber::new(1); +// let builder_state_id = BuilderStateId { +// parent_commitment: new_parent_commit, +// parent_view: new_view_num, +// }; +// +// let builder_hash_1 = BuilderCommitment::from_bytes([1, 2, 3, 4]); +// let block_id = BlockId { +// hash: builder_hash_1, +// view: new_view_num, +// }; +// +// let (vid_trigger_sender, vid_trigger_receiver) = +// async_compatibility_layer::channel::oneshot(); +// let (vid_sender, vid_receiver) = unbounded(); +// let (block_payload, metadata) = +// >::from_transactions( +// vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await +// .unwrap(); +// let offered_fee = 64u64; +// let block_size = 64u64; +// let truncated = false; +// +// let build_block_info = BuildBlockInfo { +// id: block_id.clone(), +// block_size, +// offered_fee, +// block_payload: block_payload.clone(), +// metadata, +// vid_trigger: vid_trigger_sender, +// vid_receiver, +// truncated, +// }; +// +// let builder_hash_2 = BuilderCommitment::from_bytes([2, 3, 4, 5]); +// let response_msg = ResponseMessage { +// builder_hash: builder_hash_2.clone(), +// block_size: 32, +// offered_fee: 128, +// }; +// +// // Now that every object is prepared and setup for storage, we can +// // test the `update_global_state` method. +// +// // `update_global_state` has not return value from its method, so can +// // only inspect its "success" based on the mutation of the state object. +// state.update_global_state(builder_state_id.clone(), build_block_info, response_msg); +// +// // two things should be adjusted by `update_global_state`: +// // - state.blocks +// // - state.builder_state_to_last_built_block +// +// // start with blocks +// +// assert_eq!( +// state.blocks.len(), +// 1, +// "The blocks LRU should have a single entry" +// ); +// +// let retrieved_block_info = state.blocks.get(&block_id); +// assert!( +// retrieved_block_info.is_some(), +// "Retrieval of the block id should result is a valid block info data" +// ); +// +// let retrieved_block_info = retrieved_block_info.unwrap(); +// +// assert_eq!( +// retrieved_block_info.block_payload, block_payload, +// "The block payloads should match" +// ); +// assert_eq!( +// retrieved_block_info.metadata, metadata, +// "The metadata should match" +// ); +// assert_eq!( +// retrieved_block_info.offered_fee, offered_fee, +// "The offered fee should match" +// ); +// assert_eq!( +// retrieved_block_info.truncated, truncated, +// "The truncated flag should match" +// ); +// +// { +// // This ensures that the vid_trigger that is stored is still the +// // same, or links to the vid_trigger_receiver that we submitted. +// let mut vid_trigger_write_lock_guard = +// retrieved_block_info.vid_trigger.write_arc().await; +// if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { +// vid_trigger_sender.send(TriggerStatus::Start); +// } +// +// match vid_trigger_receiver.recv().await { +// Ok(TriggerStatus::Start) => { +// // This is expected +// } +// _ => { +// panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); +// } +// } +// } +// +// { +// // This ensures that the vid_sender that is stored is still the +// // same, or links to the vid_receiver that we submitted. +// let (vid_commitment, vid_precompute) = +// precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); +// assert_eq!( +// vid_sender +// .send((vid_commitment, vid_precompute.clone())) +// .await, +// Ok(()), +// "The vid_sender should be able to send the vid commitment and precompute" +// ); +// +// let mut vid_receiver_write_lock_guard = +// retrieved_block_info.vid_receiver.write_arc().await; +// +// // Get and Keep object +// +// match vid_receiver_write_lock_guard.get().await { +// Ok((received_vid_commitment, received_vid_precompute)) => { +// assert_eq!( +// received_vid_commitment, vid_commitment, +// "The received vid commitment should match the expected vid commitment" +// ); +// assert_eq!( +// received_vid_precompute, vid_precompute, +// "The received vid precompute should match the expected vid precompute" +// ); +// } +// _ => { +// panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); +// } +// } +// } +// +// // finish with builder_state_to_last_built_block +// +// assert_eq!( +// state.builder_state_to_last_built_block.len(), +// 1, +// "The builder state to last built block should have a single entry" +// ); +// +// let last_built_block = state +// .builder_state_to_last_built_block +// .get(&builder_state_id); +// +// assert!( +// last_built_block.is_some(), +// "The last built block should be retrievable" +// ); +// +// let last_built_block = last_built_block.unwrap(); +// +// assert_eq!( +// last_built_block.builder_hash, builder_hash_2, +// "The last built block id should match the block id" +// ); +// +// assert_eq!( +// last_built_block.block_size, 32, +// "The last built block size should match the response message" +// ); +// +// assert_eq!( +// last_built_block.offered_fee, 128, +// "The last built block offered fee should match the response message" +// ); +// } +// +// /// This test demonstrates the replacement behavior of the the +// /// `update_global_state` method. +// /// +// /// When given a `BuilderStateId` that already exists in the `blocks` LRU, +// /// and the `builder_state_to_last_built_block` hashmap, the method will +// /// replace the values in the `builder_state_to_last_built_block` hashmap, +// /// and it will also replace the entry in the `block`s LRU. +// #[async_std::test] +// async fn test_global_state_update_global_state_replacement() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// let new_parent_commit = vid_commitment(&[], 9); +// let new_view_num = ViewNumber::new(1); +// let builder_state_id = BuilderStateId { +// parent_commitment: new_parent_commit, +// parent_view: new_view_num, +// }; +// +// let builder_hash = BuilderCommitment::from_bytes([1, 2, 3, 4]); +// let block_id_1 = BlockId { +// hash: builder_hash.clone(), +// view: new_view_num, +// }; +// let (vid_trigger_sender_1, vid_trigger_receiver_1) = +// async_compatibility_layer::channel::oneshot(); +// let (vid_sender_1, vid_receiver_1) = unbounded(); +// let (block_payload_1, metadata_1) = +// >::from_transactions( +// vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await +// .unwrap(); +// let offered_fee_1 = 64u64; +// let block_size_1 = 64u64; +// let truncated_1 = false; +// let build_block_info_1 = BuildBlockInfo { +// id: block_id_1.clone(), +// block_size: block_size_1, +// offered_fee: offered_fee_1, +// block_payload: block_payload_1.clone(), +// metadata: metadata_1, +// vid_trigger: vid_trigger_sender_1, +// vid_receiver: vid_receiver_1, +// truncated: truncated_1, +// }; +// let response_msg_1 = ResponseMessage { +// builder_hash: builder_hash.clone(), +// block_size: block_size_1, +// offered_fee: offered_fee_1, +// }; +// +// // Now that every object is prepared and setup for storage, we can +// // test the `update_global_state` method. +// +// // `update_global_state` has no return value from its method, so we can +// // only inspect its "success" based on the mutation of the state object. +// state.update_global_state(builder_state_id.clone(), build_block_info_1, response_msg_1); +// +// // We're going to enter another update_global_state_entry with the same +// // builder_state_id, but with different values for the block info and +// // response message. This should highlight that the values get replaced +// // in this update. +// +// let block_id_2 = BlockId { +// hash: builder_hash.clone(), +// view: new_view_num, +// }; +// let (vid_trigger_sender_2, vid_trigger_receiver_2) = +// async_compatibility_layer::channel::oneshot(); +// let (vid_sender_2, vid_receiver_2) = unbounded(); +// let (block_payload_2, metadata_2) = +// >::from_transactions( +// vec![TestTransaction::new(vec![2, 3, 4, 5, 6, 7, 8, 9, 10, 11])], +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await +// .unwrap(); +// let offered_fee_2 = 16u64; +// let block_size_2 = 32u64; +// let truncated_2 = true; +// let build_block_info_2 = BuildBlockInfo { +// id: block_id_2.clone(), +// block_size: block_size_2, +// offered_fee: offered_fee_2, +// block_payload: block_payload_2.clone(), +// metadata: metadata_2, +// vid_trigger: vid_trigger_sender_2, +// vid_receiver: vid_receiver_2, +// truncated: truncated_2, +// }; +// let response_msg_2: ResponseMessage = ResponseMessage { +// builder_hash: builder_hash.clone(), +// block_size: block_size_2, +// offered_fee: offered_fee_2, +// }; +// +// // two things should be adjusted by `update_global_state`: +// // When given the same build_state_ids. +// state.update_global_state(builder_state_id.clone(), build_block_info_2, response_msg_2); +// +// // start with blocks +// +// assert_eq!( +// state.blocks.len(), +// 1, +// "The blocks LRU should have a single entry" +// ); +// +// let retrieved_block_info = state.blocks.get(&block_id_2); +// assert!( +// retrieved_block_info.is_some(), +// "Retrieval of the block id should result is a valid block info data" +// ); +// +// let retrieved_block_info = retrieved_block_info.unwrap(); +// +// assert_eq!( +// retrieved_block_info.block_payload, block_payload_2, +// "The block payloads should match" +// ); +// assert_ne!( +// retrieved_block_info.block_payload, block_payload_1, +// "The block payloads should not match" +// ); +// assert_eq!( +// retrieved_block_info.metadata, metadata_2, +// "The metadata should match" +// ); +// assert_eq!( +// retrieved_block_info.metadata, metadata_1, +// "The metadata should match" +// ); +// // TestMetadata will always match +// +// assert_eq!( +// retrieved_block_info.offered_fee, offered_fee_2, +// "The offered fee should match" +// ); +// assert_ne!( +// retrieved_block_info.offered_fee, offered_fee_1, +// "The offered fee should not match" +// ); +// assert_eq!( +// retrieved_block_info.truncated, truncated_2, +// "The truncated flag should match" +// ); +// assert_ne!( +// retrieved_block_info.truncated, truncated_1, +// "The truncated flag should not match" +// ); +// +// { +// // This ensures that the vid_trigger that is stored is still the +// // same, or links to the vid_trigger_receiver that we submitted. +// let mut vid_trigger_write_lock_guard = +// retrieved_block_info.vid_trigger.write_arc().await; +// if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { +// vid_trigger_sender.send(TriggerStatus::Start); +// } +// +// match vid_trigger_receiver_2.recv().await { +// Ok(TriggerStatus::Start) => { +// // This is expected +// } +// _ => { +// panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); +// } +// } +// +// assert!( +// vid_trigger_receiver_1.recv().await.is_err(), +// "This should not receive anything from vid_trigger_receiver_1" +// ); +// } +// +// { +// // This ensures that the vid_sender that is stored is still the +// // same, or links to the vid_receiver that we submitted. +// let (vid_commitment, vid_precompute) = +// precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); +// assert_eq!( +// vid_sender_2 +// .send((vid_commitment, vid_precompute.clone())) +// .await, +// Ok(()), +// "The vid_sender should be able to send the vid commitment and precompute" +// ); +// +// assert!( +// vid_sender_1 +// .send((vid_commitment, vid_precompute.clone())) +// .await +// .is_err(), +// "The vid_sender should not be able to send the vid commitment and precompute" +// ); +// +// let mut vid_receiver_write_lock_guard = +// retrieved_block_info.vid_receiver.write_arc().await; +// +// // Get and Keep object +// +// match vid_receiver_write_lock_guard.get().await { +// Ok((received_vid_commitment, received_vid_precompute)) => { +// assert_eq!( +// received_vid_commitment, vid_commitment, +// "The received vid commitment should match the expected vid commitment" +// ); +// assert_eq!( +// received_vid_precompute, vid_precompute, +// "The received vid precompute should match the expected vid precompute" +// ); +// } +// _ => { +// panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); +// } +// } +// } +// +// // finish with builder_state_to_last_built_block +// +// assert_eq!( +// state.builder_state_to_last_built_block.len(), +// 1, +// "The builder state to last built block should have a single entry" +// ); +// +// let last_built_block = state +// .builder_state_to_last_built_block +// .get(&builder_state_id); +// +// assert!( +// last_built_block.is_some(), +// "The last built block should be retrievable" +// ); +// +// let last_built_block = last_built_block.unwrap(); +// +// assert_eq!( +// last_built_block.builder_hash, builder_hash, +// "The last built block id should match the block id" +// ); +// +// assert_eq!( +// last_built_block.block_size, block_size_2, +// "The last built block size should match the response message" +// ); +// assert_ne!( +// last_built_block.block_size, block_size_1, +// "The last built block size should not match the previous block size" +// ); +// +// assert_eq!( +// last_built_block.offered_fee, offered_fee_2, +// "The last built block offered fee should match the response message" +// ); +// assert_ne!( +// last_built_block.offered_fee, offered_fee_1, +// "The last built block offered fee should not match the previous block offered fee" +// ); +// } +// +// // GlobalState::remove_handles Tests +// +// /// This test checks to ensure that remove_handles will only consider +// /// views up to what is known to have been stored. As a result it will +// /// indicate that is has only targeted to the highest view number that it +// /// is aware of. +// #[async_std::test] +// async fn test_global_state_remove_handles_prune_up_to_latest() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// // We register a few builder states. +// for i in 1..=10 { +// let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let view = ViewNumber::new(i as u64); +// +// state.register_builder_state( +// BuilderStateId { +// parent_commitment: vid_commit, +// parent_view: view, +// }, +// ParentBlockReferences { +// view_number: view, +// vid_commitment: vid_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// async_broadcast::broadcast(10).0, +// ); +// } +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 11, +// "The spawned_builder_states should have the expected number of entries", +// ); +// +// assert_eq!( +// state.remove_handles(ViewNumber::new(100)), +// ViewNumber::new(10), +// "It should only be able to prune up to what has been stored" +// ); +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 1, +// "The spawned_builder_states should only have a single entry in it" +// ); +// +// let builder_state_id = BuilderStateId { +// parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(10), +// }; +// assert_eq!( +// state.highest_view_num_builder_id, builder_state_id, +// "The highest view number builder id should be the one that was just registered" +// ); +// +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(9), +// "The last garbage collected view number should match expected value" +// ); +// +// assert!( +// state.spawned_builder_states.contains_key(&BuilderStateId { +// parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(10), +// }), +// "The spawned builder states should contain the builder state id: {builder_state_id}" +// ); +// } +// +// /// This test checks that the remove_handles doesn't ensure that the +// /// `last_garbage_collected_view_num` is strictly increasing. By first +// /// removing a higher view number, followed by a smaller view number +// /// (with the highest_view_num_builder_id having a view greater than or +// /// equal to both targets) we can demonstrate this property. +// /// +// /// Furthermore this demonstrates that by supplying any view number to +// /// remove_handles that is less than `last_garbage_collected_view_num` will +// /// result in `last_garbage_collected_view_num` being updated to the given +// /// value minus 1, without regard for it actually removing / cleaning +// /// anything, or whether it is moving backwards in view numbers. +// /// +// /// If we were to account for the view numbers actually being cleaned up, +// /// we could still trigger this behavior be re-adding the builder states +// /// with a view number that precedes the last garbage collected view number, +// /// then removing them would trigger the same behavior. +// #[async_std::test] +// async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// // We register a few builder states. +// for i in 1..=10 { +// let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let view = ViewNumber::new(i as u64); +// +// state.register_builder_state( +// BuilderStateId { +// parent_commitment: vid_commit, +// parent_view: view, +// }, +// ParentBlockReferences { +// view_number: view, +// vid_commitment: vid_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// async_broadcast::broadcast(10).0, +// ); +// } +// +// assert_eq!( +// state.highest_view_num_builder_id, +// BuilderStateId { +// parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(10), +// }, +// "The highest view number builder id should be the one that was just registered" +// ); +// +// assert_eq!( +// state.remove_handles(ViewNumber::new(10)), +// ViewNumber::new(10), +// "It should remove what has been stored" +// ); +// +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(9), +// "The last garbage collected view number should match expected value" +// ); +// +// assert_eq!( +// state.remove_handles(ViewNumber::new(5)), +// ViewNumber::new(5), +// "If we only remove up to view 5, then only entries preceding view 5 should be removed" +// ); +// +// // The last garbage collected view has gone down as a result of our +// // new remove_handles target, demonstrating that this number isn't +// // strictly increasing in value. +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(4), +// "The last garbage collected view number should match expected value", +// ); +// } +// +// /// This test checks that the remove_handles doesn't ensure that the +// /// `last_garbage_collected_view_num` is strictly increasing. It is very +// /// similar to `test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple` +// /// but differs in that it re-adds the removed builder states, just in case +// /// the previous test's behavior is erroneous and fixed by ensuring that we +// /// only consider removed view numbers. +// #[async_std::test] +// async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_strict() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// // We register a few builder states. +// for i in 1..=10 { +// let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let view = ViewNumber::new(i as u64); +// +// state.register_builder_state( +// BuilderStateId { +// parent_commitment: vid_commit, +// parent_view: view, +// }, +// ParentBlockReferences { +// view_number: view, +// vid_commitment: vid_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// async_broadcast::broadcast(10).0, +// ); +// } +// +// assert_eq!( +// state.highest_view_num_builder_id, +// BuilderStateId { +// parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(10), +// }, +// "The highest view number builder id should be the one that was just registered" +// ); +// +// assert_eq!( +// state.remove_handles(ViewNumber::new(10)), +// ViewNumber::new(10), +// "It should remove what has been stored" +// ); +// +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(9), +// "The last garbage collected view number should match expected value" +// ); +// +// // We re-add these removed builder_state_ids +// for i in 1..10 { +// let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let view = ViewNumber::new(i as u64); +// +// state.register_builder_state( +// BuilderStateId { +// parent_commitment: vid_commit, +// parent_view: view, +// }, +// ParentBlockReferences { +// view_number: view, +// vid_commitment: vid_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// async_broadcast::broadcast(10).0, +// ); +// } +// +// assert_eq!( +// state.remove_handles(ViewNumber::new(5)), +// ViewNumber::new(5), +// "If we only remove up to view 5, then only entries preceding view 5 should be removed" +// ); +// +// // The last garbage collected view has gone down as a result of our +// // new remove_handles target, demonstrating that this number isn't +// // strictly increasing in value. +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(4), +// "The last garbage collected view number should match expected value", +// ); +// } +// +// /// This test checks that the remove_handles methods will correctly remove +// /// The expected number of builder states from the spawned_builder_states +// /// hashmap. It does this by specifically controlling the number of builder +// /// states that are registered, and then removing a subset of them. It +// /// verifies the absence of the entries that should have been removed, and +// /// the presence of the entries that should have been kept. +// #[async_std::test] +// async fn test_global_state_remove_handles_expected() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let mut state = GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ); +// +// // We register a few builder states. +// for i in 1..=10 { +// let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); +// let view = ViewNumber::new(i as u64); +// +// state.register_builder_state( +// BuilderStateId { +// parent_commitment: vid_commit, +// parent_view: view, +// }, +// ParentBlockReferences { +// view_number: view, +// vid_commitment: vid_commit, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// async_broadcast::broadcast(10).0, +// ); +// } +// +// assert_eq!( +// state.spawned_builder_states.len(), +// 11, +// "The spawned_builder_states should have 11 elements in it" +// ); +// +// assert_eq!( +// state.highest_view_num_builder_id, +// BuilderStateId { +// parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(10), +// }, +// "The highest view number builder id should be the one that was just registered" +// ); +// +// assert_eq!( +// state.last_garbage_collected_view_num, +// ViewNumber::new(0), +// "The last garbage collected view number should be hat was passed in" +// ); +// +// // Now we want to clean up some previous builder states to ensure that we +// // remove the appropriate targets. +// +// // This should remove the view builder states preceding the view number 5 +// assert_eq!( +// state.remove_handles(ViewNumber::new(5)), +// ViewNumber::new(5), +// "The last garbage collected view number should match expected value" +// ); +// +// // There should be 11 - 5 entries remaining +// assert_eq!( +// state.spawned_builder_states.len(), +// 6, +// "The spawned_builder_states should have 6 elements in it" +// ); +// +// for i in 0..5 { +// let builder_state_id = BuilderStateId { +// parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(i as u64), +// }; +// assert!( +// !state.spawned_builder_states.contains_key(&builder_state_id), +// "the spawned builder states should contain the builder state id, {builder_state_id}" +// ); +// } +// +// for i in 5..=10 { +// let builder_state_id = BuilderStateId { +// parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), +// parent_view: ViewNumber::new(i as u64), +// }; +// assert!( +// state.spawned_builder_states.contains_key(&builder_state_id), +// "The spawned builder states should contain the builder state id: {builder_state_id}" +// ); +// } +// } +// +// // Get Available Blocks Tests +// +// /// This test checks that the error `AvailableBlocksError::NoBlocksAvailable` +// /// is returned when no blocks are available. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of get_available_blocks, and we do not provide any +// /// information for the block view number requested. As a result, the +// /// implementation will ultimately timeout, and return an error that +// /// indicates that no blocks were available. +// #[async_std::test] +// async fn test_get_available_blocks_error_no_blocks_available() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key), +// Duration::from_millis(100), +// ); +// +// // leader_private_key +// let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); +// +// // This *should* just time out +// let result = state +// .available_blocks_implementation( +// &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), +// 1, +// leader_public_key, +// &signature, +// ) +// .await; +// +// match result { +// Err(AvailableBlocksError::NoBlocksAvailable) => { +// // This is what we expect. +// // This message *should* indicate that no blocks were available. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `AvailableBlocksError::SignatureValidationFailed` +// /// is returned when the signature is invalid. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of get_available_blocks, but we sign the request with +// /// the builder's private key instead of the leader's private key. Since +// /// these keys do not match, this will result in a signature verification +// /// error. +// #[async_std::test] +// async fn test_get_available_blocks_error_invalid_signature() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, _leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_millis(100), +// ); +// +// // leader_private_key +// let signature = BLSPubKey::sign(&builder_private_key, parent_commit.as_ref()).unwrap(); +// +// // This *should* just time out +// let result = state +// .available_blocks_implementation( +// &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), +// 1, +// leader_public_key, +// &signature, +// ) +// .await; +// +// match result { +// Err(AvailableBlocksError::SignatureValidationFailed) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided` +// /// is returned when the requested view number has already been garbage +// /// collected. +// /// +// /// To trigger this condition, we initialize the GlobalState with a +// /// garbage collected view number that is higher than the view that will +// /// be requested. +// #[async_std::test] +// async fn test_get_available_blocks_error_requesting_previous_view_number() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(2), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key), +// Duration::from_millis(100), +// ); +// +// // leader_private_key +// let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); +// +// // This *should* just time out +// let result = state +// .available_blocks_implementation( +// &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), +// 1, +// leader_public_key, +// &signature, +// ) +// .await; +// +// match result { +// Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `AvailableBlocksError::GetChannelForMatchingBuilderError` +// /// is returned when attempting to retrieve a view that is not stored within the state, and +// /// the highest view is also no longer stored within the state. +// /// +// /// To trigger this condition, we initialize the GlobalState with an initial +// /// state, and then we mutate the state to record the wrong latest state id. +// /// When interacted with `GlobalState` via `register_builder_state`, and +// /// `remove_handles`, this error doesn't seem possible immediately possible. +// #[async_std::test] +// async fn test_get_available_blocks_error_get_channel_for_matching_builder() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(4), +// ViewNumber::new(4), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// { +// let mut write_locked_global_state = state.global_state.write_arc().await; +// write_locked_global_state.highest_view_num_builder_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(5), +// }; +// } +// +// // As a result, we **should** be receiving a request for the available +// // blocks with our expected state id on the receiver, along with a channel +// // to send the response back to the caller. +// +// let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); +// let result = state +// .available_blocks_implementation(&parent_commit, 6, leader_public_key, &signature) +// .await; +// match result { +// Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { +// // This is what we expect. +// // This message *should* indicate that the response channel was closed. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// // We have two error cases for `available_blocks_implementation` that we +// // cannot seem trigger directly due to the nature of how the implementation +// // performs. +// // +// // The first is ChannelUnexpectedlyClosed, which doesn't seem to be +// // producible as the unbounded channel doesn't seem to be able to be +// // closed. +// // +// // The second is SigningBlockFailed, which doesn't seem to be producible +// // with a valid private key, and it's not clear how to create an invalid +// // private key. +// +// /// This test checks that call to `available_blocks_implementation` returns +// /// a successful response when the function is called before blocks are +// /// made available. +// #[async_std::test] +// async fn test_get_available_blocks_requested_before_blocks_available() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let cloned_parent_commit = parent_commit; +// let cloned_state = state.clone(); +// let cloned_leader_private_key = leader_private_key.clone(); +// +// // We want to trigger a request for the available blocks, before we make the available block available +// let get_available_blocks_handle = async_std::task::spawn(async move { +// // leader_private_key +// let signature = +// BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); +// cloned_state +// .available_blocks_implementation( +// &cloned_parent_commit, +// 1, +// leader_public_key, +// &signature, +// ) +// .await +// }); +// +// // Now we want to make the block data available to the state. +// let expected_builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(1), +// }; +// +// let mut response_receiver = { +// // We only want to keep this write lock for the time needed, and +// // no more. +// let mut write_locked_global_state = state.global_state.write_arc().await; +// +// // We insert a sender so that the next time this stateId is requested, +// // it will be available to send data back. +// let (response_sender, response_receiver) = async_broadcast::broadcast(10); +// write_locked_global_state.register_builder_state( +// expected_builder_state_id.clone(), +// ParentBlockReferences { +// view_number: expected_builder_state_id.parent_view, +// vid_commitment: expected_builder_state_id.parent_commitment, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// response_sender, +// ); +// +// response_receiver +// }; +// +// // As a result, we **should** be receiving a request for the available +// // blocks with our expected state id on the receiver, along with a channel +// // to send the response back to the caller. +// +// let response_channel = match response_receiver.next().await { +// None => { +// panic!("Expected a request for available blocks, but didn't get one"); +// } +// Some(MessageType::RequestMessage(req_msg)) => { +// assert_eq!(req_msg.state_id, expected_builder_state_id); +// req_msg.response_channel +// } +// Some(message) => { +// panic!( +// "Expected a request for available blocks, but got a different message: {:?}", +// message +// ); +// } +// }; +// +// // We want to send a ResponseMessage to the channel +// let expected_response = ResponseMessage { +// block_size: 9, +// offered_fee: 7, +// builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), +// }; +// +// assert!( +// response_channel +// .send(expected_response.clone()) +// .await +// .is_ok(), +// "failed to send ResponseMessage" +// ); +// +// let result = get_available_blocks_handle.await; +// match result { +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(result) => { +// assert_eq!( +// result, +// vec![AvailableBlockInfo { +// block_hash: expected_response.builder_hash.clone(), +// block_size: expected_response.block_size, +// offered_fee: expected_response.offered_fee, +// signature: ::sign_block_info( +// &builder_private_key, +// expected_response.block_size, +// expected_response.offered_fee, +// &expected_response.builder_hash, +// ) +// .unwrap(), +// sender: builder_public_key, +// _phantom: Default::default(), +// }], +// "get_available_blocks response matches expectation" +// ); +// } +// } +// } +// +// /// This test checks that call to `available_blocks_implementation` returns +// /// a successful response when the function is called after blocks are +// /// made available. +// #[async_std::test] +// async fn test_get_available_blocks_requested_after_blocks_available() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let cloned_parent_commit = parent_commit; +// let cloned_state = state.clone(); +// let cloned_leader_private_key = leader_private_key.clone(); +// +// // Now we want to make the block data available to the state. +// let expected_builder_state_id = BuilderStateId { +// parent_commitment: parent_commit, +// parent_view: ViewNumber::new(1), +// }; +// +// let mut response_receiver = { +// // We only want to keep this write lock for the time needed, and +// // no more. +// let mut write_locked_global_state = state.global_state.write_arc().await; +// +// // We insert a sender so that the next time this stateId is requested, +// // it will be available to send data back. +// let (response_sender, response_receiver) = async_broadcast::broadcast(10); +// write_locked_global_state.register_builder_state( +// expected_builder_state_id.clone(), +// ParentBlockReferences { +// view_number: expected_builder_state_id.parent_view, +// vid_commitment: expected_builder_state_id.parent_commitment, +// leaf_commit: Commitment::from_raw([0; 32]), +// builder_commitment: BuilderCommitment::from_bytes([]), +// }, +// response_sender, +// ); +// +// response_receiver +// }; +// +// // We want to trigger a request for the available blocks, before we make the available block available +// let get_available_blocks_handle = async_std::task::spawn(async move { +// // leader_private_key +// let signature = +// BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); +// cloned_state +// .available_blocks_implementation( +// &cloned_parent_commit, +// 1, +// leader_public_key, +// &signature, +// ) +// .await +// }); +// +// // As a result, we **should** be receiving a request for the available +// // blocks with our expected state id on the receiver, along with a channel +// // to send the response back to the caller. +// +// let response_channel = match response_receiver.next().await { +// None => { +// panic!("Expected a request for available blocks, but didn't get one"); +// } +// Some(MessageType::RequestMessage(req_msg)) => { +// assert_eq!(req_msg.state_id, expected_builder_state_id); +// req_msg.response_channel +// } +// Some(message) => { +// panic!( +// "Expected a request for available blocks, but got a different message: {:?}", +// message +// ); +// } +// }; +// +// // We want to send a ResponseMessage to the channel +// let expected_response = ResponseMessage { +// block_size: 9, +// offered_fee: 7, +// builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), +// }; +// +// assert!( +// response_channel +// .send(expected_response.clone()) +// .await +// .is_ok(), +// "failed to send ResponseMessage" +// ); +// +// let result = get_available_blocks_handle.await; +// match result { +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(result) => { +// assert_eq!( +// result, +// vec![AvailableBlockInfo { +// block_hash: expected_response.builder_hash.clone(), +// block_size: expected_response.block_size, +// offered_fee: expected_response.offered_fee, +// signature: ::sign_block_info( +// &builder_private_key, +// expected_response.block_size, +// expected_response.offered_fee, +// &expected_response.builder_hash, +// ) +// .unwrap(), +// sender: builder_public_key, +// _phantom: Default::default(), +// }], +// "get_available_blocks response matches expectation" +// ); +// } +// } +// } +// +// // Claim Block Tests +// +// /// This test checks that the error `ClaimBlockError::SignatureValidationFailed` +// /// is returned when the signature is invalid. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we sign the request with +// /// the builder's private key instead of the leader's private key. Since +// /// these keys do not match, this will result in a signature verification +// /// error. +// #[async_std::test] +// async fn test_claim_block_error_signature_validation_failed() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, _leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// +// let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); +// let result = state +// .claim_block_implementation(&commitment, 1, leader_public_key, &signature) +// .await; +// +// match result { +// Err(ClaimBlockError::SignatureValidationFailed) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `ClaimBlockError::BlockDataNotFound` +// /// is returned when the block data is not found. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we do not provide any information +// /// for the block data requested. As a result, the implementation will +// /// ultimately timeout, and return an error that indicates that the block +// /// data was not found. +// #[async_std::test] +// async fn test_claim_block_error_block_data_not_found() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// +// let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); +// let result = state +// .claim_block_implementation(&commitment, 1, leader_public_key, &signature) +// .await; +// +// match result { +// Err(ClaimBlockError::BlockDataNotFound) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the function completes successfully. +// #[async_std::test] +// async fn test_claim_block_success() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// let cloned_commitment = commitment.clone(); +// let cloned_state = state.clone(); +// +// let vid_trigger_receiver = { +// let mut global_state_write_lock = state.global_state.write_arc().await; +// let block_id = BlockId { +// hash: commitment, +// view: ViewNumber::new(1), +// }; +// +// let payload = TestBlockPayload { +// transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], +// }; +// +// let (vid_trigger_sender, vid_trigger_receiver) = +// async_compatibility_layer::channel::oneshot(); +// let (_, vid_receiver) = unbounded(); +// +// global_state_write_lock.blocks.put( +// block_id, +// BlockInfo { +// block_payload: payload, +// metadata: TestMetadata { +// num_transactions: 1, +// }, +// vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), +// vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( +// vid_receiver, +// ))), +// offered_fee: 100, +// truncated: false, +// }, +// ); +// +// vid_trigger_receiver +// }; +// +// let claim_block_join_handle = async_std::task::spawn(async move { +// let signature = +// BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); +// cloned_state +// .claim_block_implementation(&cloned_commitment, 1, leader_public_key, &signature) +// .await +// }); +// +// // This should be the started event +// match vid_trigger_receiver.recv().await { +// Ok(TriggerStatus::Start) => { +// // This is what we expect. +// } +// _ => { +// panic!("Expected a TriggerStatus::Start event"); +// } +// } +// +// let result = claim_block_join_handle.await; +// +// match result { +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// // This is expected +// } +// } +// } +// +// // Claim Block Header Input Tests +// +// /// This test checks that the error `ClaimBlockHeaderInputError::SignatureValidationFailed` +// /// is returned when the signature is invalid. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we sign the request with +// /// the builder's private key instead of the leader's private key. Since +// /// these keys do not match, this will result in a signature verification +// /// error. +// #[async_std::test] +// async fn test_claim_block_header_input_error_signature_verification_failed() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, _leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// +// let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); +// +// let result = state +// .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) +// .await; +// +// match result { +// Err(ClaimBlockHeaderInputError::SignatureValidationFailed) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `ClaimBlockHeaderInputError::BlockHeaderNotFound` +// /// is returned when the block header is not found. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we do not provide any information +// /// for the block header requested. As a result, the implementation will +// /// ultimately timeout, and return an error that indicates that the block +// /// header was not found. +// #[async_std::test] +// async fn test_claim_block_header_input_error_block_header_not_found() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// +// let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); +// +// let result = state +// .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) +// .await; +// +// match result { +// Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `ClaimBlockHeaderInputError::CouldNotGetVidInTime` +// /// is returned when the VID is not received in time. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we do not provide a VID. As a result, +// /// the implementation will ultimately timeout, and return an error that +// /// indicates that the VID was not received in time. +// /// +// /// At least that's what it should do. At the moment, this results in a +// /// deadlock due to attempting to acquire the `write_arc` twice. +// #[async_std::test] +// async fn test_claim_block_header_input_error_could_not_get_vid_in_time() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// let cloned_commitment = commitment.clone(); +// let cloned_state = state.clone(); +// +// let _vid_sender = { +// let mut global_state_write_lock = state.global_state.write_arc().await; +// let block_id = BlockId { +// hash: commitment, +// view: ViewNumber::new(1), +// }; +// +// let payload = TestBlockPayload { +// transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], +// }; +// +// let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); +// let (vid_sender, vid_receiver) = unbounded(); +// +// global_state_write_lock.blocks.put( +// block_id, +// BlockInfo { +// block_payload: payload, +// metadata: TestMetadata { +// num_transactions: 1, +// }, +// vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), +// vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( +// vid_receiver, +// ))), +// offered_fee: 100, +// truncated: false, +// }, +// ); +// +// vid_sender +// }; +// +// let claim_block_header_input_join_handle = async_std::task::spawn(async move { +// let signature = +// BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); +// cloned_state +// .claim_block_header_input_implementation( +// &cloned_commitment, +// 1, +// leader_public_key, +// &signature, +// ) +// .await +// }); +// +// let result = claim_block_header_input_join_handle.await; +// +// match result { +// Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that the error `ClaimBlockHeaderInputError::WaitAndKeepGetError` +// /// is returned when the VID is not received in time. +// /// +// /// To trigger this condition, we simply submit a request to the +// /// implementation of claim_block, but we close the VID receiver channel's +// /// sender. +// #[async_std::test] +// async fn test_claim_block_header_input_error_keep_and_wait_get_error() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// let cloned_commitment = commitment.clone(); +// let cloned_state = state.clone(); +// +// { +// let mut global_state_write_lock = state.global_state.write_arc().await; +// let block_id = BlockId { +// hash: commitment, +// view: ViewNumber::new(1), +// }; +// +// let payload = TestBlockPayload { +// transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], +// }; +// +// let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); +// let (_, vid_receiver) = unbounded(); +// +// global_state_write_lock.blocks.put( +// block_id, +// BlockInfo { +// block_payload: payload, +// metadata: TestMetadata { +// num_transactions: 1, +// }, +// vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), +// vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( +// vid_receiver, +// ))), +// offered_fee: 100, +// truncated: false, +// }, +// ); +// }; +// +// let claim_block_header_input_join_handle = async_std::task::spawn(async move { +// let signature = +// BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); +// cloned_state +// .claim_block_header_input_implementation( +// &cloned_commitment, +// 1, +// leader_public_key, +// &signature, +// ) +// .await +// }); +// +// let result = claim_block_header_input_join_handle.await; +// +// match result { +// Err(ClaimBlockHeaderInputError::WaitAndKeepGetError(_)) => { +// // This is what we expect. +// // This message *should* indicate that the signature passed +// // did not match the given public key. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// /// This test checks that successful response is returned when the VID is +// /// received in time. +// #[async_std::test] +// async fn test_claim_block_header_input_success() { +// let (bootstrap_sender, _) = async_broadcast::broadcast(10); +// let (tx_sender, _) = async_broadcast::broadcast(10); +// let (builder_public_key, builder_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (leader_public_key, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); +// +// let state = Arc::new(ProxyGlobalState::::new( +// Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender, +// parent_commit, +// ViewNumber::new(0), +// ViewNumber::new(0), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// TEST_NUM_NODES_IN_VID_COMPUTATION, +// ))), +// (builder_public_key, builder_private_key.clone()), +// Duration::from_secs(1), +// )); +// +// let commitment = BuilderCommitment::from_bytes([0; 256]); +// let cloned_commitment = commitment.clone(); +// let cloned_state = state.clone(); +// +// let vid_sender = { +// let mut global_state_write_lock = state.global_state.write_arc().await; +// let block_id = BlockId { +// hash: commitment, +// view: ViewNumber::new(1), +// }; +// +// let payload = TestBlockPayload { +// transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], +// }; +// +// let (vid_trigger_sender, _) = async_compatibility_layer::channel::oneshot(); +// let (vid_sender, vid_receiver) = unbounded(); +// +// global_state_write_lock.blocks.put( +// block_id, +// BlockInfo { +// block_payload: payload, +// metadata: TestMetadata { +// num_transactions: 1, +// }, +// vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), +// vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( +// vid_receiver, +// ))), +// offered_fee: 100, +// truncated: false, +// }, +// ); +// +// vid_sender +// }; +// +// let claim_block_header_input_join_handle = async_std::task::spawn(async move { +// let signature = +// BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); +// cloned_state +// .claim_block_header_input_implementation( +// &cloned_commitment, +// 1, +// leader_public_key, +// &signature, +// ) +// .await +// }); +// +// vid_sender +// .send(precompute_vid_commitment(&[1, 2, 3, 4], 2)) +// .await +// .unwrap(); +// +// let result = claim_block_header_input_join_handle.await; +// +// match result { +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// Ok(_) => { +// // This is expected. +// } +// } +// } +// +// // handle_da_event Tests +// +// /// This test checks that the error [HandleDaEventError::SignatureValidationFailed] +// /// is returned under the right conditions of invoking +// /// [handle_da_event_implementation]. +// /// +// /// To trigger this error, we simply need to ensure that signature provided +// /// to the [Proposal] does not match the public key of the sender. +// /// Additionally, the public keys passed for both the leader and the sender +// /// need to match each other. +// #[async_std::test] +// async fn test_handle_da_event_implementation_error_signature_validation_failed() { +// let (sender_public_key, _) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (_, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let (da_channel_sender, _) = async_broadcast::broadcast(10); +// let view_number = ViewNumber::new(10); +// +// let da_proposal = DaProposal:: { +// encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), +// metadata: TestMetadata { +// num_transactions: 1, +// }, // arbitrary +// view_number, +// }; +// +// let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); +// let signature = +// ::sign(&leader_private_key, &encoded_txns_hash).unwrap(); +// +// let signed_da_proposal = Arc::new(Proposal { +// data: da_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_da_event_implementation( +// &da_channel_sender, +// signed_da_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Err(HandleDaEventError::SignatureValidationFailed) => { +// // This is expected. +// } +// Ok(_) => { +// panic!("expected an error, but received a successful attempt instead") +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// +// /// This test checks that the error [HandleDaEventError::BroadcastFailed] +// /// is returned under the right conditions of invoking +// /// [handle_da_event_implementation]. +// /// +// /// To trigger this error, we simply need to ensure that the broadcast +// /// channel receiver has been closed / dropped before the attempt to +// /// send on the broadcast sender is performed. +// #[async_std::test] +// async fn test_handle_da_event_implementation_error_broadcast_failed() { +// let (sender_public_key, sender_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let da_channel_sender = { +// let (da_channel_sender, _) = async_broadcast::broadcast(10); +// da_channel_sender +// }; +// +// let view_number = ViewNumber::new(10); +// +// let da_proposal = DaProposal:: { +// encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), +// metadata: TestMetadata { +// num_transactions: 1, +// }, // arbitrary +// view_number, +// }; +// +// let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); +// let signature = +// ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); +// +// let signed_da_proposal = Arc::new(Proposal { +// data: da_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_da_event_implementation( +// &da_channel_sender, +// signed_da_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Err(HandleDaEventError::BroadcastFailed(_)) => { +// // This error is expected +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// +// /// This test checks the expected successful behavior of the +// /// [handle_da_event_implementation] function. +// #[async_std::test] +// async fn test_handle_da_event_implementation_success() { +// let (sender_public_key, sender_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (da_channel_sender, da_channel_receiver) = async_broadcast::broadcast(10); +// let view_number = ViewNumber::new(10); +// +// let da_proposal = DaProposal:: { +// encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), +// metadata: TestMetadata { +// num_transactions: 1, +// }, // arbitrary +// view_number, +// }; +// +// let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); +// let signature = +// ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); +// +// let signed_da_proposal = Arc::new(Proposal { +// data: da_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_da_event_implementation( +// &da_channel_sender, +// signed_da_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Ok(_) => { +// // This is expected. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// +// let mut da_channel_receiver = da_channel_receiver; +// match da_channel_receiver.next().await { +// Some(MessageType::DaProposalMessage(da_proposal_message)) => { +// assert_eq!(da_proposal_message.proposal, signed_da_proposal); +// } +// _ => { +// panic!("Expected a DaProposalMessage, but got something else"); +// } +// } +// } +// +// // handle_quorum_event Tests +// +// /// This test checks that the error [HandleQuorumEventError::SignatureValidationFailed] +// /// is returned under the right conditions of invoking +// /// [handle_quorum_event_implementation]. +// /// +// /// To trigger this error, we simply need to ensure that the signature +// /// provided to the [Proposal] does not match the public key of the sender. +// /// +// /// Additionally, the public keys passed for both the leader and the sender +// /// need to match each other. +// #[async_std::test] +// async fn test_handle_quorum_event_error_signature_validation_failed() { +// let (sender_public_key, _) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (_, leader_private_key) = +// ::generated_from_seed_indexed([0; 32], 1); +// let (quorum_channel_sender, _) = async_broadcast::broadcast(10); +// let view_number = ViewNumber::new(10); +// +// let quorum_proposal = { +// let leaf = Leaf::::genesis( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await; +// +// QuorumProposal:: { +// block_header: leaf.block_header().clone(), +// view_number, +// justify_qc: QuorumCertificate::genesis::( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await, +// upgrade_certificate: None, +// proposal_certificate: None, +// } +// }; +// +// let leaf = Leaf::from_quorum_proposal(&quorum_proposal); +// +// let signature = +// ::sign(&leader_private_key, leaf.legacy_commit().as_ref()) +// .unwrap(); +// +// let signed_quorum_proposal = Arc::new(Proposal { +// data: quorum_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_quorum_event_implementation( +// &quorum_channel_sender, +// signed_quorum_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Err(HandleQuorumEventError::SignatureValidationFailed) => { +// // This is expected. +// } +// Ok(_) => { +// panic!("expected an error, but received a successful attempt instead"); +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// +// /// This test checks that the error [HandleQuorumEventError::BroadcastFailed] +// /// is returned under the right conditions of invoking +// /// [handle_quorum_event_implementation]. +// /// +// /// To trigger this error, we simply need to ensure that the broadcast +// /// channel receiver has been closed / dropped before the attempt to +// /// send on the broadcast sender is performed. +// #[async_std::test] +// async fn test_handle_quorum_event_error_broadcast_failed() { +// let (sender_public_key, sender_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let quorum_channel_sender = { +// let (quorum_channel_sender, _) = async_broadcast::broadcast(10); +// quorum_channel_sender +// }; +// +// let view_number = ViewNumber::new(10); +// +// let quorum_proposal = { +// let leaf = Leaf::::genesis( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await; +// +// QuorumProposal:: { +// block_header: leaf.block_header().clone(), +// view_number, +// justify_qc: QuorumCertificate::genesis::( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await, +// upgrade_certificate: None, +// proposal_certificate: None, +// } +// }; +// +// let leaf = Leaf::from_quorum_proposal(&quorum_proposal); +// +// let signature = +// ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) +// .unwrap(); +// +// let signed_quorum_proposal = Arc::new(Proposal { +// data: quorum_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_quorum_event_implementation( +// &quorum_channel_sender, +// signed_quorum_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Err(HandleQuorumEventError::BroadcastFailed(_)) => { +// // This is expected. +// } +// Ok(_) => { +// panic!("Expected an error, but got a result"); +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// +// /// This test checks to ensure that [handle_quorum_event_implementation] +// /// completes successfully as expected when the correct conditions are met. +// #[async_std::test] +// async fn test_handle_quorum_event_success() { +// let (sender_public_key, sender_private_key) = +// ::generated_from_seed_indexed([0; 32], 0); +// let (quorum_channel_sender, quorum_channel_receiver) = async_broadcast::broadcast(10); +// let view_number = ViewNumber::new(10); +// +// let quorum_proposal = { +// let leaf = Leaf::::genesis( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await; +// +// QuorumProposal:: { +// block_header: leaf.block_header().clone(), +// view_number, +// justify_qc: QuorumCertificate::genesis::( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await, +// upgrade_certificate: None, +// proposal_certificate: None, +// } +// }; +// +// let leaf = Leaf::from_quorum_proposal(&quorum_proposal); +// +// let signature = +// ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) +// .unwrap(); +// +// let signed_quorum_proposal = Arc::new(Proposal { +// data: quorum_proposal, +// signature, +// _pd: Default::default(), +// }); +// +// let result = handle_quorum_event_implementation( +// &quorum_channel_sender, +// signed_quorum_proposal.clone(), +// sender_public_key, +// ) +// .await; +// +// match result { +// Ok(_) => { +// // This is expected. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// +// let mut quorum_channel_receiver = quorum_channel_receiver; +// match quorum_channel_receiver.next().await { +// Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { +// assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); +// } +// _ => { +// panic!("Expected a QuorumProposalMessage, but got something else"); +// } +// } +// } +// +// // HandleReceivedTxns Tests +// +// /// This test checks that the error [HandleReceivedTxnsError::TooManyTransactions] +// /// is returned when the conditions are met. +// /// +// /// To trigger this error we simply provide a broadcast channel with a +// /// buffer smaller than the number of transactions we are attempting to +// /// send through it. +// #[async_std::test] +// async fn test_handle_received_txns_error_too_many_transactions() { +// let (tx_sender, tx_receiver) = async_broadcast::broadcast(2); +// let num_transactions = 5; +// let mut txns = Vec::with_capacity(num_transactions); +// for index in 0..num_transactions { +// txns.push(TestTransaction::new(vec![index as u8])); +// } +// let txns = txns; +// +// { +// let mut handle_received_txns_iter = HandleReceivedTxns::::new( +// tx_sender, +// txns.clone(), +// TransactionSource::HotShot, +// TEST_MAX_TX_LEN, +// ); +// +// assert!(handle_received_txns_iter.next().is_some()); +// assert!(handle_received_txns_iter.next().is_some()); +// match handle_received_txns_iter.next() { +// Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { +// // This is expected, +// } +// Some(Err(err)) => { +// panic!("Unexpected error: {:?}", err); +// } +// Some(Ok(_)) => { +// panic!("Expected an error, but got a result"); +// } +// None => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// let mut tx_receiver = tx_receiver; +// assert!(tx_receiver.next().await.is_some()); +// assert!(tx_receiver.next().await.is_some()); +// assert!(tx_receiver.next().await.is_none()); +// } +// +// /// This test checks that the error [HandleReceivedTxnsError::TransactionTooBig] +// /// when the conditions are met. +// /// +// /// To trigger this error we simply provide a [TestTransaction] whose size +// /// exceeds the maximum transaction length. we pass to [HandleReceivedTxns]. +// #[async_std::test] +// async fn test_handle_received_txns_error_transaction_too_big() { +// let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); +// let num_transactions = 2; +// let mut txns = Vec::with_capacity(num_transactions + 1); +// for index in 0..num_transactions { +// txns.push(TestTransaction::new(vec![index as u8])); +// } +// txns.push(TestTransaction::new(vec![0; 256])); +// let txns = txns; +// +// { +// let mut handle_received_txns_iter = HandleReceivedTxns::::new( +// tx_sender, +// txns.clone(), +// TransactionSource::HotShot, +// TEST_MAX_TX_LEN, +// ); +// +// assert!(handle_received_txns_iter.next().is_some()); +// assert!(handle_received_txns_iter.next().is_some()); +// match handle_received_txns_iter.next() { +// Some(Err(HandleReceivedTxnsError::TransactionTooBig { +// estimated_length, +// max_txn_len, +// })) => { +// // This is expected, +// assert!(estimated_length >= 256); +// assert_eq!(max_txn_len, TEST_MAX_TX_LEN); +// } +// Some(Err(err)) => { +// panic!("Unexpected error: {:?}", err); +// } +// Some(Ok(_)) => { +// panic!("Expected an error, but got a result"); +// } +// None => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// +// let mut tx_receiver = tx_receiver; +// assert!(tx_receiver.next().await.is_some()); +// assert!(tx_receiver.next().await.is_some()); +// assert!(tx_receiver.next().await.is_none()); +// } +// +// /// This test checks that the error [HandleReceivedTxnsError::Internal] +// /// is returned when the broadcast channel is closed. +// /// +// /// To trigger this error we simply close the broadcast channel receiver +// /// before attempting to send any transactions through the broadcast channel +// /// sender. +// #[async_std::test] +// async fn test_handle_received_txns_error_internal() { +// let tx_sender = { +// let (tx_sender, _) = async_broadcast::broadcast(10); +// tx_sender +// }; +// +// let num_transactions = 10; +// let mut txns = Vec::with_capacity(num_transactions); +// for index in 0..num_transactions { +// txns.push(TestTransaction::new(vec![index as u8])); +// } +// txns.push(TestTransaction::new(vec![0; 256])); +// let txns = txns; +// +// { +// let mut handle_received_txns_iter = HandleReceivedTxns::::new( +// tx_sender, +// txns.clone(), +// TransactionSource::HotShot, +// TEST_MAX_TX_LEN, +// ); +// +// match handle_received_txns_iter.next() { +// Some(Err(HandleReceivedTxnsError::Internal(err))) => { +// // This is expected, +// +// match err { +// async_broadcast::TrySendError::Closed(_) => { +// // This is expected. +// } +// _ => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// Some(Err(err)) => { +// panic!("Unexpected error: {:?}", err); +// } +// Some(Ok(_)) => { +// panic!("Expected an error, but got a result"); +// } +// None => { +// panic!("Expected an error, but got a result"); +// } +// } +// } +// } +// +// /// This test checks that [HandleReceivedTxns] processes completely without +// /// issue when the conditions are correct for it to do so. +// #[async_std::test] +// async fn test_handle_received_txns_success() { +// let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); +// let num_transactions = 10; +// let mut txns = Vec::with_capacity(num_transactions); +// for index in 0..num_transactions { +// txns.push(TestTransaction::new(vec![index as u8])); +// } +// let txns = txns; +// +// let handle_received_txns_iter = HandleReceivedTxns::::new( +// tx_sender, +// txns.clone(), +// TransactionSource::HotShot, +// TEST_MAX_TX_LEN, +// ); +// +// for iteration in handle_received_txns_iter { +// match iteration { +// Ok(_) => { +// // This is expected. +// } +// Err(err) => { +// panic!("Unexpected error: {:?}", err); +// } +// } +// } +// +// let mut tx_receiver = tx_receiver; +// for tx in txns { +// match tx_receiver.next().await { +// Some(received_txn) => { +// assert_eq!(received_txn.tx, tx); +// } +// _ => { +// panic!("Expected a TransactionMessage, but got something else"); +// } +// } +// } +// } +// } +// diff --git a/crates/legacy/src/testing/basic_test.rs b/crates/legacy/src/testing/basic_test.rs index 0f21739e..52bf4082 100644 --- a/crates/legacy/src/testing/basic_test.rs +++ b/crates/legacy/src/testing/basic_test.rs @@ -41,6 +41,7 @@ mod tests { TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, }; + use marketplace_builder_shared::utils::LegacyCommit; use crate::builder_state::{ DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, @@ -48,7 +49,6 @@ mod tests { use crate::service::{ handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction, }; - use crate::LegacyCommit; use async_lock::RwLock; use committable::{Commitment, CommitmentBoundsArkless, Committable}; use sha2::{Digest, Sha256}; diff --git a/crates/legacy/src/testing/mod.rs b/crates/legacy/src/testing/mod.rs index 1fc5da8c..8fa0d802 100644 --- a/crates/legacy/src/testing/mod.rs +++ b/crates/legacy/src/testing/mod.rs @@ -1,12 +1,6 @@ use std::{collections::VecDeque, marker::PhantomData}; -use crate::{ - builder_state::{ - BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, - }, - service::ReceivedTransaction, - LegacyCommit, -}; +use crate::builder_state::{DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage}; use async_broadcast::broadcast; use async_broadcast::Sender as BroadcastSender; use hotshot::{ @@ -27,6 +21,7 @@ use hotshot_example_types::{ node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use marketplace_builder_shared::block::ReceivedTransaction; use sha2::{Digest, Sha256}; use crate::service::GlobalState; @@ -35,221 +30,224 @@ use committable::{Commitment, CommitmentBoundsArkless, Committable}; use marketplace_builder_shared::{ block::{BuilderStateId, ParentBlockReferences}, testing::constants::{TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_PROTOCOL_MAX_BLOCK_SIZE}, + utils::LegacyCommit, }; use std::sync::Arc; use std::time::Duration; -mod basic_test; -mod finalization_test; - -pub async fn create_builder_state( - channel_capacity: usize, - num_storage_nodes: usize, -) -> ( - BroadcastSender>, - Arc>>, - BuilderState, -) { - // set up the broadcast channels - let (bootstrap_sender, bootstrap_receiver) = - broadcast::>(channel_capacity); - let (_decide_sender, decide_receiver) = broadcast::>(channel_capacity); - let (_da_sender, da_receiver) = broadcast::>(channel_capacity); - let (_quorum_sender, quorum_proposal_receiver) = - broadcast::>(channel_capacity); - let (senders, _receivers) = broadcast::>(channel_capacity); - let (tx_sender, tx_receiver) = - broadcast::>>(channel_capacity); - - let genesis_vid_commitment = vid_commitment(&[], num_storage_nodes); - let genesis_builder_commitment = BuilderCommitment::from_bytes([]); - - // instantiate the global state - let global_state = Arc::new(RwLock::new(GlobalState::::new( - bootstrap_sender, - tx_sender.clone(), - genesis_vid_commitment, - ViewNumber::genesis(), - ViewNumber::genesis(), - TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, - TEST_PROTOCOL_MAX_BLOCK_SIZE, - num_storage_nodes, - ))); - - // instantiate the bootstrap builder state - let builder_state = BuilderState::new( - ParentBlockReferences { - view_number: ViewNumber::new(0), - vid_commitment: genesis_vid_commitment, - leaf_commit: Commitment::>::default_commitment_no_preimage(), - builder_commitment: genesis_builder_commitment, - }, - decide_receiver.clone(), - da_receiver.clone(), - quorum_proposal_receiver.clone(), - bootstrap_receiver, - tx_receiver, - VecDeque::new(), - global_state.clone(), - Duration::from_millis(100), - 1, - Arc::new(TestInstanceState::default()), - Duration::from_millis(100), - Arc::new(TestValidatedState::default()), - ); - - (senders, global_state, builder_state) -} - -/// get transactions submitted in previous rounds, [] for genesis -/// and simulate the block built from those -pub async fn calc_proposal_msg( - num_storage_nodes: usize, - round: usize, - prev_quorum_proposal: Option>, - transactions: Vec, -) -> ( - QuorumProposal, - QuorumProposalMessage, - DaProposalMessage, - BuilderStateId, -) { - // get transactions submitted in previous rounds, [] for genesis - // and simulate the block built from those - let num_transactions = transactions.len() as u64; - let encoded_transactions = TestTransaction::encode(&transactions); - let block_payload = TestBlockPayload { transactions }; - let block_vid_commitment = vid_commitment(&encoded_transactions, num_storage_nodes); - let metadata = TestMetadata { num_transactions }; - let block_builder_commitment = - >::builder_commitment( - &block_payload, - &metadata, - ); - - // generate key for leader of this round - let seed = [round as u8; 32]; - let (pub_key, private_key) = BLSPubKey::generated_from_seed_indexed(seed, round as u64); - - // Prepare the DA proposal message - let da_proposal_message: DaProposalMessage = { - let da_proposal = DaProposal { - encoded_transactions: encoded_transactions.clone().into(), - metadata: TestMetadata { - num_transactions: encoded_transactions.len() as u64, - }, - view_number: ViewNumber::new(round as u64), - }; - let encoded_transactions_hash = Sha256::digest(&encoded_transactions); - let da_signature = - ::SignatureKey::sign( - &private_key, - &encoded_transactions_hash, - ) - .expect("Failed to sign encoded tx hash while preparing da proposal"); - - DaProposalMessage:: { - proposal: Arc::new(Proposal { - data: da_proposal, - signature: da_signature.clone(), - _pd: PhantomData, - }), - sender: pub_key, - } - }; - - let block_header = TestBlockHeader { - block_number: round as u64, - payload_commitment: block_vid_commitment, - builder_commitment: block_builder_commitment, - timestamp: round as u64, - metadata, - random: 1, // arbitrary - }; - - let justify_qc = match prev_quorum_proposal.as_ref() { - None => { - QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - } - Some(prev_proposal) => { - let prev_justify_qc = &prev_proposal.justify_qc; - let quorum_data = QuorumData:: { - leaf_commit: Leaf::from_quorum_proposal(prev_proposal).legacy_commit(), - }; - - // form a justify qc - SimpleCertificate::, SuccessThreshold>::new( - quorum_data.clone(), - quorum_data.commit(), - prev_proposal.view_number, - prev_justify_qc.signatures.clone(), - PhantomData, - ) - } - }; - - tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); - - let quorum_proposal = QuorumProposal:: { - block_header, - view_number: ViewNumber::new(round as u64), - justify_qc: justify_qc.clone(), - upgrade_certificate: None, - proposal_certificate: None, - }; - - let quorum_signature = - ::SignatureKey::sign( - &private_key, - block_vid_commitment.as_ref(), - ) - .expect("Failed to sign payload commitment while preparing Quorum proposal"); - - let quorum_proposal_msg = QuorumProposalMessage:: { - proposal: Arc::new(Proposal { - data: quorum_proposal.clone(), - signature: quorum_signature, - _pd: PhantomData, - }), - sender: pub_key, - }; - - let builder_state_id = BuilderStateId { - parent_commitment: block_vid_commitment, - parent_view: ViewNumber::new(round as u64), - }; - ( - quorum_proposal, - quorum_proposal_msg, - da_proposal_message, - builder_state_id, - ) -} - -pub async fn calc_builder_commitment( - da_proposal_message: DaProposalMessage, -) -> (BuilderCommitment, DAProposalInfo) { - // If the respective builder state exists to handle the request - let proposal = da_proposal_message.proposal.clone(); - // get the view number and encoded txns from the da_proposal_data - let view_number = proposal.data.view_number; - let encoded_txns = &proposal.data.encoded_transactions; - - let metadata = &proposal.data.metadata; - // form a block payload from the encoded transactions - let block_payload = - >::from_bytes(encoded_txns, metadata); - // get the builder commitment from the block payload - let payload_builder_commitment = - >::builder_commitment(&block_payload, metadata); - // form the DA proposal info - let da_proposal_info = DAProposalInfo { - view_number, - proposal, - }; - (payload_builder_commitment, da_proposal_info) -} +// TODO: +//mod basic_test; +//mod finalization_test; + +// pub async fn create_builder_state( +// channel_capacity: usize, +// num_storage_nodes: usize, +// ) -> ( +// BroadcastSender>, +// Arc>>, +// BuilderState, +// ) { +// // set up the broadcast channels +// let (bootstrap_sender, bootstrap_receiver) = +// broadcast::>(channel_capacity); +// let (_decide_sender, decide_receiver) = broadcast::>(channel_capacity); +// let (_da_sender, da_receiver) = broadcast::>(channel_capacity); +// let (_quorum_sender, quorum_proposal_receiver) = +// broadcast::>(channel_capacity); +// let (senders, _receivers) = broadcast::>(channel_capacity); +// let (tx_sender, tx_receiver) = +// broadcast::>>(channel_capacity); +// +// let genesis_vid_commitment = vid_commitment(&[], num_storage_nodes); +// let genesis_builder_commitment = BuilderCommitment::from_bytes([]); +// +// // instantiate the global state +// let global_state = Arc::new(RwLock::new(GlobalState::::new( +// bootstrap_sender, +// tx_sender.clone(), +// genesis_vid_commitment, +// ViewNumber::genesis(), +// ViewNumber::genesis(), +// TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +// TEST_PROTOCOL_MAX_BLOCK_SIZE, +// num_storage_nodes, +// ))); +// +// // instantiate the bootstrap builder state +// let builder_state = BuilderState::new( +// ParentBlockReferences { +// view_number: ViewNumber::new(0), +// vid_commitment: genesis_vid_commitment, +// leaf_commit: Commitment::>::default_commitment_no_preimage(), +// builder_commitment: genesis_builder_commitment, +// }, +// decide_receiver.clone(), +// da_receiver.clone(), +// quorum_proposal_receiver.clone(), +// bootstrap_receiver, +// tx_receiver, +// VecDeque::new(), +// global_state.clone(), +// Duration::from_millis(100), +// 1, +// Arc::new(TestInstanceState::default()), +// Duration::from_millis(100), +// Arc::new(TestValidatedState::default()), +// ); +// +// (senders, global_state, builder_state) +// } +// +// /// get transactions submitted in previous rounds, [] for genesis +// /// and simulate the block built from those +// pub async fn calc_proposal_msg( +// num_storage_nodes: usize, +// round: usize, +// prev_quorum_proposal: Option>, +// transactions: Vec, +// ) -> ( +// QuorumProposal, +// QuorumProposalMessage, +// DaProposalMessage, +// BuilderStateId, +// ) { +// // get transactions submitted in previous rounds, [] for genesis +// // and simulate the block built from those +// let num_transactions = transactions.len() as u64; +// let encoded_transactions = TestTransaction::encode(&transactions); +// let block_payload = TestBlockPayload { transactions }; +// let block_vid_commitment = vid_commitment(&encoded_transactions, num_storage_nodes); +// let metadata = TestMetadata { num_transactions }; +// let block_builder_commitment = +// >::builder_commitment( +// &block_payload, +// &metadata, +// ); +// +// // generate key for leader of this round +// let seed = [round as u8; 32]; +// let (pub_key, private_key) = BLSPubKey::generated_from_seed_indexed(seed, round as u64); +// +// // Prepare the DA proposal message +// let da_proposal_message: DaProposalMessage = { +// let da_proposal = DaProposal { +// encoded_transactions: encoded_transactions.clone().into(), +// metadata: TestMetadata { +// num_transactions: encoded_transactions.len() as u64, +// }, +// view_number: ViewNumber::new(round as u64), +// }; +// let encoded_transactions_hash = Sha256::digest(&encoded_transactions); +// let da_signature = +// ::SignatureKey::sign( +// &private_key, +// &encoded_transactions_hash, +// ) +// .expect("Failed to sign encoded tx hash while preparing da proposal"); +// +// DaProposalMessage:: { +// proposal: Arc::new(Proposal { +// data: da_proposal, +// signature: da_signature.clone(), +// _pd: PhantomData, +// }), +// sender: pub_key, +// } +// }; +// +// let block_header = TestBlockHeader { +// block_number: round as u64, +// payload_commitment: block_vid_commitment, +// builder_commitment: block_builder_commitment, +// timestamp: round as u64, +// metadata, +// random: 1, // arbitrary +// }; +// +// let justify_qc = match prev_quorum_proposal.as_ref() { +// None => { +// QuorumCertificate::::genesis::( +// &TestValidatedState::default(), +// &TestInstanceState::default(), +// ) +// .await +// } +// Some(prev_proposal) => { +// let prev_justify_qc = &prev_proposal.justify_qc; +// let quorum_data = QuorumData:: { +// leaf_commit: Leaf::from_quorum_proposal(prev_proposal).legacy_commit(), +// }; +// +// // form a justify qc +// SimpleCertificate::, SuccessThreshold>::new( +// quorum_data.clone(), +// quorum_data.commit(), +// prev_proposal.view_number, +// prev_justify_qc.signatures.clone(), +// PhantomData, +// ) +// } +// }; +// +// tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); +// +// let quorum_proposal = QuorumProposal:: { +// block_header, +// view_number: ViewNumber::new(round as u64), +// justify_qc: justify_qc.clone(), +// upgrade_certificate: None, +// proposal_certificate: None, +// }; +// +// let quorum_signature = +// ::SignatureKey::sign( +// &private_key, +// block_vid_commitment.as_ref(), +// ) +// .expect("Failed to sign payload commitment while preparing Quorum proposal"); +// +// let quorum_proposal_msg = QuorumProposalMessage:: { +// proposal: Arc::new(Proposal { +// data: quorum_proposal.clone(), +// signature: quorum_signature, +// _pd: PhantomData, +// }), +// sender: pub_key, +// }; +// +// let builder_state_id = BuilderStateId { +// parent_commitment: block_vid_commitment, +// parent_view: ViewNumber::new(round as u64), +// }; +// ( +// quorum_proposal, +// quorum_proposal_msg, +// da_proposal_message, +// builder_state_id, +// ) +// } +// +// pub async fn calc_builder_commitment( +// da_proposal_message: DaProposalMessage, +// ) -> (BuilderCommitment, DAProposalInfo) { +// // If the respective builder state exists to handle the request +// let proposal = da_proposal_message.proposal.clone(); +// // get the view number and encoded txns from the da_proposal_data +// let view_number = proposal.data.view_number; +// let encoded_txns = &proposal.data.encoded_transactions; +// +// let metadata = &proposal.data.metadata; +// // form a block payload from the encoded transactions +// let block_payload = +// >::from_bytes(encoded_txns, metadata); +// // get the builder commitment from the block payload +// let payload_builder_commitment = +// >::builder_commitment(&block_payload, metadata); +// // form the DA proposal info +// let da_proposal_info = DAProposalInfo { +// view_number, +// proposal, +// }; +// (payload_builder_commitment, da_proposal_info) +// } +// diff --git a/crates/marketplace/Cargo.toml b/crates/marketplace/Cargo.toml index 629c54be..ee1ab85e 100644 --- a/crates/marketplace/Cargo.toml +++ b/crates/marketplace/Cargo.toml @@ -13,15 +13,15 @@ async-trait = { workspace = true } committable = { workspace = true } derive_more = { workspace = true, features = ["deref", "deref_mut"] } futures = { workspace = true } -marketplace-builder-shared = { path = "../shared" } - hotshot = { workspace = true } hotshot-builder-api = { workspace = true } hotshot-types = { workspace = true } lru = { workspace = true } +marketplace-builder-shared = { path = "../shared" } sha2 = { workspace = true } tagged-base64 = { workspace = true } tide-disco = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true }