diff --git a/.github/workflows/build_nix.yml b/.github/workflows/build_nix.yml index 7dff7d5c..5a5f6eba 100644 --- a/.github/workflows/build_nix.yml +++ b/.github/workflows/build_nix.yml @@ -11,6 +11,12 @@ on: jobs: nix: + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] + env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" runs-on: ubuntu-latest timeout-minutes: 90 steps: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 9e9786bd..d0a4d80f 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -34,9 +34,8 @@ jobs: run: | git config --global --add safe.directory "$PWD" nix develop .#perfShell -c \ - cargo llvm-cov --profile=release --all-features --all-targets \ - --lcov --output-path lcov.info --no-cfg-coverage \ - -- --test-threads 2 + cargo llvm-cov nextest -j 2 --release --all-features --all-targets \ + --branch --lcov --output-path lcov.info \ - uses: coverallsapp/github-action@master with: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 92f471d6..1610e0b7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,9 +14,14 @@ concurrency: jobs: lint: + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] runs-on: ubuntu-latest env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" RUST_LOG: info steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f8015890..3dd437d6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,9 +14,14 @@ concurrency: jobs: test: - runs-on: ubuntu-latest + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" RUST_LOG: info + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -42,7 +47,6 @@ jobs: timeout-minutes: 60 test-ignored: - if: false runs-on: ubuntu-latest env: RUST_MIN_STACK: 64000000 diff --git a/Cargo.lock b/Cargo.lock index b2b6aed1..92551360 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,6 +95,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -130,6 +131,15 @@ dependencies = [ "libc", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "anstream" version = "0.6.18" @@ -891,6 +901,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -1166,6 +1185,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "bytemuck" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +dependencies = [ + "bytemuck_derive", +] + +[[package]] +name = "bytemuck_derive" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "byteorder" version = "1.5.0" @@ -1380,6 +1419,17 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +[[package]] +name = "coarsetime" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13b3839cf01bb7960114be3ccf2340f541b6d0c81f8690b007b2b39f750f7e5d" +dependencies = [ + "libc", + "wasix", + "wasm-bindgen", +] + [[package]] name = "colorchoice" version = "1.0.3" @@ -2873,16 +2923,25 @@ dependencies = [ "async-broadcast", "async-lock 3.4.0", "async-trait", + "atomic", "bincode", + "bytemuck", + "coarsetime", "committable", "derive_more 1.0.0", "futures", "hotshot", "hotshot-builder-api", "hotshot-example-types", + "hotshot-macros", + "hotshot-task-impls", + "hotshot-testing", "hotshot-types", "lru 0.12.5", "marketplace-builder-shared", + "num_cpus", + "once_cell", + "portpicker", "serde", "sha2 0.10.8", "tagged-base64", @@ -2890,6 +2949,8 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber 0.3.18", + "tracing-test 0.1.0", + "url", "vbs", ] @@ -2916,7 +2977,7 @@ dependencies = [ "tokio", "toml", "tracing", - "tracing-test", + "tracing-test 0.2.5", "vbs", ] @@ -3344,7 +3405,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -4602,7 +4663,6 @@ dependencies = [ "hotshot-macros", "hotshot-testing", "hotshot-types", - "lru 0.12.5", "marketplace-builder-shared", "num_cpus", "sha2 0.10.8", @@ -4611,6 +4671,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber 0.3.18", + "tracing-test 0.1.0", "url", "vbs", ] @@ -4639,14 +4700,19 @@ dependencies = [ "hotshot-types", "jf-vid", "nonempty-collections", + "once_cell", "portpicker", + "quick_cache", "rand 0.8.5", "serde", + "sha2 0.10.8", "surf-disco", + "thiserror 2.0.3", "tide-disco", "tokio", "tracing", "tracing-subscriber 0.3.18", + "tracing-test 0.1.0", "url", "vbs", "vec1", @@ -4658,6 +4724,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matchers" version = "0.1.0" @@ -5677,6 +5752,18 @@ dependencies = [ "unsigned-varint 0.8.0", ] +[[package]] +name = "quick_cache" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d7c94f8935a9df96bb6380e8592c70edf497a643f94bd23b2f76b399385dbf4" +dependencies = [ + "ahash 0.8.11", + "equivalent", + "hashbrown 0.14.5", + "parking_lot", +] + [[package]] name = "quinn" version = "0.11.5" @@ -7432,7 +7519,7 @@ dependencies = [ "tracing", "tracing-distributed", "tracing-futures", - "tracing-log", + "tracing-log 0.2.0", "tracing-subscriber 0.3.18", "url", "vbs", @@ -7802,6 +7889,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-log" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + [[package]] name = "tracing-log" version = "0.2.0" @@ -7829,7 +7927,20 @@ version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers 0.0.1", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", "tracing-core", + "tracing-log 0.1.4", + "tracing-serde", ] [[package]] @@ -7838,7 +7949,7 @@ version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "matchers", + "matchers 0.1.0", "nu-ansi-term", "once_cell", "regex", @@ -7849,10 +7960,22 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.2.0", "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3b48778c2d401c6a7fcf38a0e3c55dc8e8e753cbd381044a8cdb6fd69a29f53" +dependencies = [ + "lazy_static", + "tracing-core", + "tracing-subscriber 0.2.25", + "tracing-test-macro 0.1.0", +] + [[package]] name = "tracing-test" version = "0.2.5" @@ -7861,7 +7984,18 @@ checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" dependencies = [ "tracing-core", "tracing-subscriber 0.3.18", - "tracing-test-macro", + "tracing-test-macro 0.2.5", +] + +[[package]] +name = "tracing-test-macro" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c49adbab879d2e0dd7f75edace5f0ac2156939ecb7e6a1e8fa14e53728328c48" +dependencies = [ + "lazy_static", + "quote", + "syn 1.0.109", ] [[package]] @@ -8227,6 +8361,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" +[[package]] +name = "wasix" +version = "0.12.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1fbb4ef9bbca0c1170e0b00dd28abc9e3b68669821600cad1caaed606583c6d" +dependencies = [ + "wasi 0.11.0+wasi-snapshot-preview1", +] + [[package]] name = "wasm-bindgen" version = "0.2.95" @@ -8388,7 +8531,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows-core", + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -8401,6 +8544,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 85569fd1..892e61bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,10 +30,13 @@ either = "1.13" futures = "0.3" jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } hex = "0.4.3" -lru = "0.12.5" +lru = "0.12" multimap = "0.10.0" nonempty-collections = "0.2" +once_cell = "1.20" num_cpus = "1.16" +portpicker = "0.1.1" +quick_cache = "0.6" rand = "0.8" serde = "1.0" serde_json = "1.0" @@ -42,9 +45,11 @@ snafu = "0.8" surf-disco = "0.9" tagged-base64 = "0.4" tide-disco = "0.9" +thiserror = "2.0" tokio = "1" toml = "0.8" tracing = "0.1" +tracing-test = "0.1" typenum = "1.17" url = "2.5" vbs = "0.1" @@ -52,7 +57,10 @@ vec1 = "1.12" tracing-subscriber = "0.3" [workspace.lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage_nightly)'] } +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(legacy_builder_refactored, values(none()))', + 'cfg(coverage_nightly)', +] } [workspace.lints.clippy] disallowed-names = "deny" diff --git a/crates/legacy/Cargo.toml b/crates/legacy/Cargo.toml index e96b790e..6a2228c5 100644 --- a/crates/legacy/Cargo.toml +++ b/crates/legacy/Cargo.toml @@ -10,14 +10,18 @@ anyhow = { workspace = true } async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } +atomic = "0.6" bincode = { workspace = true } +bytemuck = { version = "1.19", features = ["derive"] } +coarsetime = "0.1.34" committable = { workspace = true } -derive_more = { workspace = true, features = ["deref", "deref_mut"] } +derive_more = { workspace = true, features = ["deref", "deref_mut", "debug"] } futures = { workspace = true } hotshot = { workspace = true } hotshot-builder-api = { workspace = true } hotshot-types = { workspace = true } lru = { workspace = true } +once_cell = { workspace = true } serde = { workspace = true, features = ["derive"] } sha2 = { workspace = true } tagged-base64 = { workspace = true } @@ -29,6 +33,13 @@ vbs = { workspace = true } [dev-dependencies] hotshot-example-types = { workspace = true } +hotshot-macros = { workspace = true } +hotshot-task-impls = { workspace = true } +hotshot-testing = { workspace = true } +num_cpus = { workspace = true } +portpicker = { workspace = true } +tracing-test = { workspace = true } +url = { workspace = true } [lints] workspace = true diff --git a/crates/legacy/src/lib.rs b/crates/legacy/src/lib.rs index 820e2153..42842991 100644 --- a/crates/legacy/src/lib.rs +++ b/crates/legacy/src/lib.rs @@ -1,75 +1,5 @@ -// Copyright (c) 2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot Builder Protocol. -// +#[cfg_attr(legacy_builder_refactored, path = "refactored/lib.rs")] +#[cfg_attr(not(legacy_builder_refactored), path = "old/lib.rs")] +mod implementation; -// Builder Phase 1 -// It mainly provides three API services to hotshot proposers: -// 1. Serves a proposer(leader)'s request to provide blocks information -// 2. Serves a proposer(leader)'s request to provide the full blocks information -// 3. Serves a proposer(leader)'s request to provide the block header information - -// It also provides one API services external users: -// 1. Serves a user's request to submit a private transaction - -// providing the core services to support above API services -pub mod builder_state; - -// Core interaction with the HotShot network -pub mod service; - -// tracking the testing -#[cfg(test)] -pub mod testing; - -use hotshot_builder_api::v0_1::builder::BuildError; -use tokio::sync::mpsc::UnboundedReceiver; - -/// `WaitAndKeep` is a helper enum that allows for the lazy polling of a single -/// value from an unbound receiver. -#[derive(Debug)] -pub enum WaitAndKeep { - Keep(T), - Wait(UnboundedReceiver), -} - -#[derive(Debug)] -pub(crate) enum WaitAndKeepGetError { - FailedToResolvedVidCommitmentFromChannel, -} - -impl From for BuildError { - fn from(e: WaitAndKeepGetError) -> Self { - match e { - WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel => { - BuildError::Error("failed to resolve VidCommitment from channel".to_string()) - } - } - } -} - -impl WaitAndKeep { - /// get will return a clone of the value that is already stored within the - /// value of `WaitAndKeep::Keep` if the value is already resolved. Otherwise - /// it will poll the next value from the channel and replace the locally - /// stored `WaitAndKeep::Wait` with the resolved value as a `WaitAndKeep::Keep`. - /// - /// Note: This pattern seems very similar to a Future, and ultimately - /// returns a future. It's not clear why this needs to be implemented - /// in such a way and not just implemented as a boxed future. - pub(crate) async fn get(&mut self) -> Result { - match self { - WaitAndKeep::Keep(t) => Ok(t.clone()), - WaitAndKeep::Wait(fut) => { - let got = fut - .recv() - .await - .ok_or(WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel); - if let Ok(got) = &got { - let mut replace = WaitAndKeep::Keep(got.clone()); - core::mem::swap(self, &mut replace); - } - got - } - } - } -} +pub use implementation::*; diff --git a/crates/legacy/src/builder_state.rs b/crates/legacy/src/old/builder_state.rs similarity index 99% rename from crates/legacy/src/builder_state.rs rename to crates/legacy/src/old/builder_state.rs index 9c0c0415..dabcc913 100644 --- a/crates/legacy/src/builder_state.rs +++ b/crates/legacy/src/old/builder_state.rs @@ -628,6 +628,9 @@ impl BuilderState { vid_commitment: quorum_proposal.data.block_header.payload_commitment(), leaf_commit: leaf.commit(), builder_commitment: quorum_proposal.data.block_header.builder_commitment(), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }; let builder_state_id = BuilderStateId { @@ -1436,6 +1439,9 @@ mod test { vid_commitment: quorum_proposal.block_header.payload_commitment, leaf_commit, builder_commitment: quorum_proposal.block_header.builder_commitment, + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender, ); diff --git a/crates/legacy/src/old/lib.rs b/crates/legacy/src/old/lib.rs new file mode 100644 index 00000000..820e2153 --- /dev/null +++ b/crates/legacy/src/old/lib.rs @@ -0,0 +1,75 @@ +// Copyright (c) 2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot Builder Protocol. +// + +// Builder Phase 1 +// It mainly provides three API services to hotshot proposers: +// 1. Serves a proposer(leader)'s request to provide blocks information +// 2. Serves a proposer(leader)'s request to provide the full blocks information +// 3. Serves a proposer(leader)'s request to provide the block header information + +// It also provides one API services external users: +// 1. Serves a user's request to submit a private transaction + +// providing the core services to support above API services +pub mod builder_state; + +// Core interaction with the HotShot network +pub mod service; + +// tracking the testing +#[cfg(test)] +pub mod testing; + +use hotshot_builder_api::v0_1::builder::BuildError; +use tokio::sync::mpsc::UnboundedReceiver; + +/// `WaitAndKeep` is a helper enum that allows for the lazy polling of a single +/// value from an unbound receiver. +#[derive(Debug)] +pub enum WaitAndKeep { + Keep(T), + Wait(UnboundedReceiver), +} + +#[derive(Debug)] +pub(crate) enum WaitAndKeepGetError { + FailedToResolvedVidCommitmentFromChannel, +} + +impl From for BuildError { + fn from(e: WaitAndKeepGetError) -> Self { + match e { + WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel => { + BuildError::Error("failed to resolve VidCommitment from channel".to_string()) + } + } + } +} + +impl WaitAndKeep { + /// get will return a clone of the value that is already stored within the + /// value of `WaitAndKeep::Keep` if the value is already resolved. Otherwise + /// it will poll the next value from the channel and replace the locally + /// stored `WaitAndKeep::Wait` with the resolved value as a `WaitAndKeep::Keep`. + /// + /// Note: This pattern seems very similar to a Future, and ultimately + /// returns a future. It's not clear why this needs to be implemented + /// in such a way and not just implemented as a boxed future. + pub(crate) async fn get(&mut self) -> Result { + match self { + WaitAndKeep::Keep(t) => Ok(t.clone()), + WaitAndKeep::Wait(fut) => { + let got = fut + .recv() + .await + .ok_or(WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel); + if let Ok(got) = &got { + let mut replace = WaitAndKeep::Keep(got.clone()); + core::mem::swap(self, &mut replace); + } + got + } + } + } +} diff --git a/crates/legacy/src/service.rs b/crates/legacy/src/old/service.rs similarity index 99% rename from crates/legacy/src/service.rs rename to crates/legacy/src/old/service.rs index ba2bc695..c026ca36 100644 --- a/crates/legacy/src/service.rs +++ b/crates/legacy/src/old/service.rs @@ -1803,6 +1803,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -1836,6 +1839,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -1893,6 +1899,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -1927,6 +1936,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -2010,6 +2022,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -2043,6 +2058,9 @@ mod test { vid_commitment: parent_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, req_sender.clone(), ); @@ -2582,6 +2600,9 @@ mod test { vid_commitment: vid_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, async_broadcast::broadcast(10).0, ); @@ -2677,6 +2698,9 @@ mod test { vid_commitment: vid_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, async_broadcast::broadcast(10).0, ); @@ -2757,6 +2781,9 @@ mod test { vid_commitment: vid_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, async_broadcast::broadcast(10).0, ); @@ -2798,6 +2825,9 @@ mod test { vid_commitment: vid_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, async_broadcast::broadcast(10).0, ); @@ -2857,6 +2887,9 @@ mod test { vid_commitment: vid_commit, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, async_broadcast::broadcast(10).0, ); @@ -3254,6 +3287,9 @@ mod test { vid_commitment: expected_builder_state_id.parent_commitment, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, response_sender, ); @@ -3377,6 +3413,9 @@ mod test { vid_commitment: expected_builder_state_id.parent_commitment, leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, response_sender, ); diff --git a/crates/legacy/src/testing/basic_test.rs b/crates/legacy/src/old/testing/basic_test.rs similarity index 99% rename from crates/legacy/src/testing/basic_test.rs rename to crates/legacy/src/old/testing/basic_test.rs index 7a61e7a0..926c9da9 100644 --- a/crates/legacy/src/testing/basic_test.rs +++ b/crates/legacy/src/old/testing/basic_test.rs @@ -51,7 +51,6 @@ mod tests { use crate::service::{ handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction, }; - use async_lock::RwLock; use committable::{Commitment, CommitmentBoundsArkless, Committable}; use sha2::{Digest, Sha256}; @@ -137,6 +136,9 @@ mod tests { vid_commitment: initial_commitment, leaf_commit: Commitment::>::default_commitment_no_preimage(), builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, decide_receiver.clone(), da_receiver.clone(), diff --git a/crates/legacy/src/testing/finalization_test.rs b/crates/legacy/src/old/testing/finalization_test.rs similarity index 99% rename from crates/legacy/src/testing/finalization_test.rs rename to crates/legacy/src/old/testing/finalization_test.rs index fedacddf..3ec611dd 100644 --- a/crates/legacy/src/testing/finalization_test.rs +++ b/crates/legacy/src/old/testing/finalization_test.rs @@ -94,6 +94,9 @@ pub fn setup_builder_for_test() -> TestSetup { view_number: ViewNumber::genesis(), leaf_commit: Commitment::from_raw([0; 32]), builder_commitment: BuilderCommitment::from_bytes([0; 32]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, decide_receiver, da_proposal_receiver, diff --git a/crates/legacy/src/testing/mod.rs b/crates/legacy/src/old/testing/mod.rs similarity index 98% rename from crates/legacy/src/testing/mod.rs rename to crates/legacy/src/old/testing/mod.rs index a0ae1284..00787a41 100644 --- a/crates/legacy/src/testing/mod.rs +++ b/crates/legacy/src/old/testing/mod.rs @@ -86,6 +86,9 @@ pub async fn create_builder_state( vid_commitment: genesis_vid_commitment, leaf_commit: Commitment::>::default_commitment_no_preimage(), builder_commitment: genesis_builder_commitment, + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, }, decide_receiver.clone(), da_receiver.clone(), diff --git a/crates/legacy/src/refactored/block_size_limits.rs b/crates/legacy/src/refactored/block_size_limits.rs new file mode 100644 index 00000000..a88616b2 --- /dev/null +++ b/crates/legacy/src/refactored/block_size_limits.rs @@ -0,0 +1,172 @@ +use atomic::Atomic; +use coarsetime::{Duration, Instant}; +use std::sync::atomic::Ordering; + +#[derive(Debug, Clone, Copy, bytemuck::NoUninit)] +#[repr(C)] +pub(crate) struct MutableState { + /// Current block size limits + pub max_block_size: u64, + /// Last time we've incremented the max block size, obtained + /// as [`coarsetime::Instant::as_ticks()`] + pub last_block_size_increment: u64, +} + +/// Adjustable limits for block size ceiled by maximum block size allowed by the protocol. +/// We will avoid build blocks over this size limit for performance reasons: computing VID +/// for bigger blocks could be too costly and lead to API timeouts. +/// +/// Will be decremented if we fail to respond to `claim_block_header_input` request in time, +/// and periodically incremented in two cases +/// - we've served a response to `claim_block_header_input` in time and the block we've served +/// was truncated because of our current max block size policy. +/// - we've served a response to `claim_block_header_input` in time and [`Self::increment_period`] +/// has passed since last time we've incremented the block limits +#[derive(Debug)] +pub struct BlockSizeLimits { + pub(crate) mutable_state: Atomic, + /// Maximum block size as defined by protocol. We'll never increment beyound that + pub protocol_max_block_size: u64, + /// Period between optimistic increments of the block size + pub increment_period: Duration, +} + +impl BlockSizeLimits { + /// Never go lower than 10 kilobytes + pub const MAX_BLOCK_SIZE_FLOOR: u64 = 10_000; + /// When adjusting max block size, it will be decremented or incremented + /// by current value / `MAX_BLOCK_SIZE_CHANGE_DIVISOR` + pub const MAX_BLOCK_SIZE_CHANGE_DIVISOR: u64 = 10; + + pub fn new(protocol_max_block_size: u64, increment_period: std::time::Duration) -> Self { + Self { + protocol_max_block_size, + increment_period: increment_period.into(), + mutable_state: Atomic::new(MutableState { + max_block_size: protocol_max_block_size, + last_block_size_increment: Instant::now().as_ticks(), + }), + } + } + + pub fn max_block_size(&self) -> u64 { + self.mutable_state + .load(std::sync::atomic::Ordering::Relaxed) + .max_block_size + } + + /// If increment period has elapsed or `force` flag is set, + /// increment [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::protocol_max_block_size`] as a ceiling + pub fn try_increment_block_size(&self, force: bool) { + if force + || Instant::now().as_ticks().saturating_sub( + self.mutable_state + .load(Ordering::Relaxed) + .last_block_size_increment, + ) >= self.increment_period.as_ticks() + { + self.mutable_state + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |previous| { + let max_block_size = std::cmp::min( + previous.max_block_size + + previous + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + self.protocol_max_block_size, + ); + let last_block_size_increment = Instant::now().as_ticks(); + Some(MutableState { + max_block_size, + last_block_size_increment, + }) + }) + .expect("Closure always returns Some"); + } + } + + /// Decrement [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::MAX_BLOCK_SIZE_FLOOR`] as a floor + pub fn decrement_block_size(&self) { + self.mutable_state + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |previous| { + let max_block_size = std::cmp::max( + previous.max_block_size.saturating_sub( + previous + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + ), + Self::MAX_BLOCK_SIZE_FLOOR, + ); + Some(MutableState { + max_block_size, + last_block_size_increment: previous.last_block_size_increment, + }) + }) + .expect("Closure always returns Some"); + } +} + +#[cfg(test)] +mod tests { + use marketplace_builder_shared::testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }; + use tracing_test::traced_test; + + use super::*; + + #[test] + #[traced_test] + fn test_increment_block_size() { + let mut block_size_limits = BlockSizeLimits::new( + TEST_PROTOCOL_MAX_BLOCK_SIZE, + std::time::Duration::from_millis(25), + ); + // Simulate decreased limits + block_size_limits.mutable_state = Atomic::new(MutableState { + max_block_size: TEST_PROTOCOL_MAX_BLOCK_SIZE / 2, + last_block_size_increment: Instant::now().as_ticks(), + }); + + // Shouldn't increment, increment period hasn't passed yet + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size() == TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + + // Should increment, increment period hasn't passed yet, but force flag is set + block_size_limits.try_increment_block_size(true); + assert!(block_size_limits.max_block_size() > TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + let new_size = block_size_limits.max_block_size(); + + std::thread::sleep(std::time::Duration::from_millis(30)); + + // Should increment, increment period has passed + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size() > new_size); + } + + #[test] + #[traced_test] + fn test_decrement_block_size() { + let block_size_limits = BlockSizeLimits::new( + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert!(block_size_limits.max_block_size() < TEST_PROTOCOL_MAX_BLOCK_SIZE); + } + + #[test] + #[traced_test] + fn test_max_block_size_floor() { + let block_size_limits = BlockSizeLimits::new( + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + 1, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert_eq!( + block_size_limits.max_block_size(), + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + ); + } +} diff --git a/crates/legacy/src/refactored/block_store.rs b/crates/legacy/src/refactored/block_store.rs new file mode 100644 index 00000000..7b0d7639 --- /dev/null +++ b/crates/legacy/src/refactored/block_store.rs @@ -0,0 +1,87 @@ +use std::marker::PhantomData; + +use hotshot::traits::BlockPayload; +use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; +use hotshot_types::traits::signature_key::BuilderSignatureKey; +use hotshot_types::vid::{VidCommitment, VidPrecomputeData}; +use marketplace_builder_shared::error::Error; +use marketplace_builder_shared::utils::{BuilderKeys, WaitAndKeep}; +use marketplace_builder_shared::{ + block::BuilderStateId, coordinator::tiered_view_map::TieredViewMap, +}; + +use marketplace_builder_shared::block::BlockId; + +use hotshot_types::traits::node_implementation::NodeType; + +// It holds all the necessary information for a block +#[derive(Debug, Clone)] +pub struct BlockInfo { + pub block_payload: Types::BlockPayload, + pub metadata: <::BlockPayload as BlockPayload>::Metadata, + pub vid_data: WaitAndKeep<(VidCommitment, VidPrecomputeData)>, + pub block_size: u64, + pub offered_fee: u64, + // Could we have included more transactions with this block, but chose not to? + pub truncated: bool, +} + +impl BlockInfo { + pub fn signed_response( + &self, + keys: &BuilderKeys, + ) -> Result, Error> { + let block_hash = self.block_payload.builder_commitment(&self.metadata); + let signature = ::BuilderSignatureKey::sign_block_info( + &keys.1, + self.block_size, + self.offered_fee, + &block_hash, + ) + .map_err(Error::Signing)?; + Ok(AvailableBlockInfo { + block_hash, + block_size: self.block_size, + offered_fee: self.offered_fee, + signature, + sender: keys.0.clone(), + _phantom: PhantomData, + }) + } +} + +#[derive(Default)] +pub struct BlockStore { + pub(crate) blocks: TieredViewMap, BlockInfo>, + pub(crate) block_cache: TieredViewMap, BlockId>, +} + +impl BlockStore { + pub fn new() -> Self { + Self::default() + } + + pub fn update( + &mut self, + built_by: BuilderStateId, + block_id: BlockId, + block_info: BlockInfo, + ) { + self.blocks.insert(block_id.clone(), block_info); + self.block_cache.insert(built_by, block_id); + } + + pub fn get_cached(&self, builder_id: &BuilderStateId) -> Option<&BlockInfo> { + let block_id = self.block_cache.get(builder_id)?; + self.blocks.get(block_id) + } + + pub fn get_block(&self, block_id: &BlockId) -> Option<&BlockInfo> { + self.blocks.get(block_id) + } + + pub fn prune(&mut self, cutoff: Types::View) { + self.blocks.prune(cutoff); + self.block_cache.prune(cutoff); + } +} diff --git a/crates/legacy/src/refactored/lib.rs b/crates/legacy/src/refactored/lib.rs new file mode 100644 index 00000000..67d926af --- /dev/null +++ b/crates/legacy/src/refactored/lib.rs @@ -0,0 +1,18 @@ +//! Builder Phase 1 +//! It mainly provides three API services to hotshot proposers: +//! 1. Serves a proposer(leader)'s request to provide blocks information +//! 2. Serves a proposer(leader)'s request to provide the full blocks information +//! 3. Serves a proposer(leader)'s request to provide the block header information +//! +//! It also provides one API service to external users: +//! 1. Serves a user's request to submit a private transaction +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] + +pub mod block_size_limits; +pub mod block_store; +pub mod service; + +// tracking the testing +#[cfg(test)] +#[cfg_attr(coverage_nightly, coverage(off))] +pub mod testing; diff --git a/crates/legacy/src/refactored/service.rs b/crates/legacy/src/refactored/service.rs new file mode 100644 index 00000000..2fde5b0c --- /dev/null +++ b/crates/legacy/src/refactored/service.rs @@ -0,0 +1,816 @@ +use hotshot::types::Event; +use hotshot_builder_api::v0_1::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::{define_api, submit_api, BuildError, Error as BuilderApiError, TransactionStatus}, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, +}; +use hotshot_types::traits::block_contents::{precompute_vid_commitment, Transaction}; +use hotshot_types::traits::EncodeBytes; +use hotshot_types::{ + event::EventType, + traits::{ + block_contents::BlockPayload, + node_implementation::{ConsensusTime, NodeType}, + signature_key::{BuilderSignatureKey, SignatureKey}, + }, + utils::BuilderCommitment, + vid::VidCommitment, +}; +use marketplace_builder_shared::coordinator::BuilderStateLookup; +use marketplace_builder_shared::error::Error; +use marketplace_builder_shared::state::BuilderState; +use marketplace_builder_shared::utils::{BuilderKeys, WaitAndKeep}; +use tide_disco::app::AppError; +use tokio::spawn; +use tokio::time::{sleep, timeout}; +use tracing::{error, info, instrument, trace, warn}; +use vbs::version::StaticVersion; + +use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId, ReceivedTransaction, TransactionSource}, + coordinator::BuilderStateCoordinator, +}; + +use crate::block_size_limits::BlockSizeLimits; +use crate::block_store::{BlockInfo, BlockStore}; +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::Commitment; +use futures::{future::BoxFuture, Stream}; +use futures::{ + stream::{FuturesOrdered, FuturesUnordered, StreamExt}, + TryStreamExt, +}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::time::Duration; +use std::{fmt::Display, time::Instant}; +use tagged_base64::TaggedBase64; +use tide_disco::{method::ReadState, App}; +use tokio::task::JoinHandle; + +/// Proportion of overall allotted time to wait for optimal builder state +/// to appear before resorting to highest view builder state +const BUILDER_STATE_EXACT_MATCH_DIVISOR: u32 = 2; + +/// This constant governs duration of sleep in various retry loops +/// in the API. If we're re-trying something with a timeout of `X`, +/// we will sleep for `X / RETRY_LOOP_RESOLUTION` between attempts. +const RETRY_LOOP_RESOLUTION: u32 = 10; + +/// We will not increment max block value if we aren't able to serve a response +/// with a margin below [`GlobalState::max_api_waiting_time`] +/// more than [`GlobalState::max_api_waiting_time`] / `VID_RESPONSE_TARGET_MARGIN_DIVISOR` +const VID_RESPONSE_TARGET_MARGIN_DIVISOR: u32 = 10; + +/// [`ALLOW_EMPTY_BLOCK_PERIOD`] is a constant that is used to determine the +/// number of future views that we will allow building empty blocks for. +/// +/// This value governs the ability for the Builder to prioritize finalizing +/// transactions by producing empty blocks rather than avoiding the creation +/// of them, following the proposal that contains transactions. +pub(crate) const ALLOW_EMPTY_BLOCK_PERIOD: u64 = 3; + +/// Configuration to initialize the builder +#[derive(Debug, Clone)] +pub struct BuilderConfig { + /// Keys that this builder will use to sign responses + pub builder_keys: BuilderKeys, + /// Maximum time allotted for the builder to respond to an API call. + /// If the response isn't ready by this time, an error will be returned + /// to the caller. + pub max_api_waiting_time: Duration, + /// Interval at which the builder will optimistically increment its maximum + /// allowed block size in case it becomes lower than the protocol maximum. + pub max_block_size_increment_period: Duration, + /// Time the builder will wait for new transactions before answering an + /// `available_blocks` API call if the builder doesn't have any transactions at the moment + /// of the call. Should be less than [`Self::max_api_waiting_time`] + pub maximize_txn_capture_timeout: Duration, + /// (Approximate) duration over which included transaction hashes will be stored + /// by the builder for deduplication of incoming transactions. + pub txn_garbage_collect_duration: Duration, + /// Channel capacity for incoming transactions for a single builder state. + pub txn_channel_capacity: usize, + /// Capacity of cache storing information for transaction status API + pub tx_status_cache_capacity: usize, + /// Base fee; the sequencing fee for a block is calculated as block size × base fee + pub base_fee: u64, +} + +#[cfg(test)] +impl BuilderConfig { + pub(crate) fn test() -> Self { + use marketplace_builder_shared::testing::constants::*; + Self { + builder_keys: + ::generated_from_seed_indexed( + [0u8; 32], 42, + ), + max_api_waiting_time: TEST_API_TIMEOUT, + max_block_size_increment_period: TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + maximize_txn_capture_timeout: TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, + txn_garbage_collect_duration: TEST_INCLUDED_TX_GC_PERIOD, + txn_channel_capacity: TEST_CHANNEL_BUFFER_SIZE, + tx_status_cache_capacity: TEST_TX_STATUS_CACHE_CAPACITY, + base_fee: TEST_BASE_FEE, + } + } +} + +pub struct GlobalState { + /// Underlying coordinator, responsible for builder state lifecycle + pub(crate) coordinator: Arc>, + /// Keys that this builder will use to sign responses + pub(crate) builder_keys: BuilderKeys, + /// Stores blocks built by this builder + pub(crate) block_store: RwLock>, + /// Limits on block size. See [`BlockSizeLimits`] documentation for more details. + pub(crate) block_size_limits: BlockSizeLimits, + /// Number of DA nodes used in VID computation + pub(crate) num_nodes: AtomicUsize, + /// Instance state, used to construct new blocks + pub(crate) instance_state: Types::InstanceState, + /// See [`BuilderConfig::max_api_waiting_time`] + pub(crate) max_api_waiting_time: Duration, + /// See [`BuilderConfig::maximize_txn_capture_timeout`] + pub(crate) maximize_txn_capture_timeout: Duration, + /// See [`BuilderConfig::base_fee`] + pub(crate) base_fee: u64, +} + +impl GlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + pub fn new( + config: BuilderConfig, + instance_state: Types::InstanceState, + protocol_max_block_size: u64, + num_nodes: usize, + ) -> Arc { + Arc::new(Self { + coordinator: Arc::new(BuilderStateCoordinator::new( + config.txn_channel_capacity, + config.txn_garbage_collect_duration, + config.tx_status_cache_capacity, + )), + block_store: RwLock::new(BlockStore::new()), + block_size_limits: BlockSizeLimits::new( + protocol_max_block_size, + config.max_block_size_increment_period, + ), + num_nodes: num_nodes.into(), + builder_keys: config.builder_keys, + max_api_waiting_time: config.max_api_waiting_time, + maximize_txn_capture_timeout: config.maximize_txn_capture_timeout, + instance_state, + base_fee: config.base_fee, + }) + } + + /// Spawns an event loop handling HotShot events from the provided stream. + /// Returns a handle for the spawned task. + pub fn start_event_loop( + self: Arc, + event_stream: impl Stream> + Unpin + Send + 'static, + ) -> JoinHandle> { + spawn(self.event_loop(event_stream)) + } + + /// Internal implementation of the event loop, drives the underlying coordinator + /// and runs hooks + async fn event_loop( + self: Arc, + mut event_stream: impl Stream> + Unpin + Send + 'static, + ) -> anyhow::Result<()> { + loop { + let Some(event) = event_stream.next().await else { + anyhow::bail!("Event stream ended"); + }; + + match event.event { + EventType::Error { error } => { + error!("Error event in HotShot: {:?}", error); + } + EventType::Transactions { transactions } => { + let this = Arc::clone(&self); + spawn(async move { + transactions + .into_iter() + .map(|txn| { + this.handle_transaction(ReceivedTransaction::new( + txn, + TransactionSource::Public, + )) + }) + .collect::>() + .collect::>() + .await; + }); + } + EventType::Decide { leaf_chain, .. } => { + let prune_cutoff = leaf_chain[0].leaf.view_number(); + + let coordinator = Arc::clone(&self.coordinator); + spawn(async move { coordinator.handle_decide(leaf_chain).await }); + + let this = Arc::clone(&self); + spawn(async move { this.block_store.write().await.prune(prune_cutoff) }); + } + EventType::DaProposal { proposal, .. } => { + let coordinator = Arc::clone(&self.coordinator); + spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); + } + EventType::QuorumProposal { proposal, .. } => { + let coordinator = Arc::clone(&self.coordinator); + spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); + } + _ => {} + } + } + } + + /// Consumes `self` and returns a `tide_disco` [`App`] with builder and private mempool APIs registered + pub fn into_app( + self: Arc, + ) -> Result, BuilderApiError>, AppError> { + let proxy = ProxyGlobalState(self); + let builder_api = define_api::, Types>(&Default::default())?; + + // TODO: Replace StaticVersion with proper constant when added in HotShot + let private_mempool_api = + submit_api::, Types, StaticVersion<0, 1>>(&Default::default())?; + + let mut app: App, BuilderApiError> = App::with_state(proxy); + + app.register_module(hotshot_types::constants::LEGACY_BUILDER_MODULE, builder_api)?; + + app.register_module("txn_submit", private_mempool_api)?; + + Ok(app) + } + + async fn handle_transaction(&self, tx: ReceivedTransaction) -> Result<(), Error> { + let len = tx.transaction.minimum_block_size(); + let max_tx_len = self.block_size_limits.max_block_size(); + if len > max_tx_len { + tracing::warn!(%tx.commit, %len, %max_tx_len, "Transaction too big"); + let error = Error::TxTooBig { len, max_tx_len }; + self.coordinator.update_txn_status( + &tx.commit, + TransactionStatus::Rejected { + reason: error.to_string(), + }, + ); + return Err(error); + } + self.coordinator.handle_transaction(tx).await + } + + async fn wait_for_builder_state( + &self, + state_id: &BuilderStateId, + check_period: Duration, + ) -> Result>, Error> { + loop { + match self.coordinator.lookup_builder_state(state_id).await { + BuilderStateLookup::Found(builder) => break Ok(builder), + BuilderStateLookup::Decided => { + return Err(Error::AlreadyDecided); + } + BuilderStateLookup::NotFound => { + sleep(check_period).await; + } + }; + } + } + + /// Build a block with provided builder state + /// + /// Returns None if there are no transactions to include + /// and we aren't prioritizing finalization for this builder state + pub(crate) async fn build_block( + &self, + builder_state: Arc>, + ) -> Result>, Error> { + let timeout_after = Instant::now() + self.maximize_txn_capture_timeout; + let sleep_interval = self.maximize_txn_capture_timeout / RETRY_LOOP_RESOLUTION; + + while Instant::now() <= timeout_after { + let queue_populated = builder_state.collect_txns(timeout_after).await; + + if queue_populated || Instant::now() + sleep_interval > timeout_after { + // we don't have time for another iteration + break; + } + + sleep(sleep_interval).await + } + + // If the parent block had transactions included and [`ALLOW_EMPTY_BLOCK_PERIOD`] views has not + // passed since, we will allow building empty blocks. This is done to allow for faster finalization + // of previous blocks that have had transactions included in them. + let should_prioritize_finalization = builder_state.parent_block_references.tx_count != 0 + && builder_state + .parent_block_references + .last_nonempty_view + .map(|nonempty_view| { + nonempty_view.saturating_sub(*builder_state.parent_block_references.view_number) + < ALLOW_EMPTY_BLOCK_PERIOD + }) + .unwrap_or(false); + + let builder: &Arc> = &builder_state; + let max_block_size = self.block_size_limits.max_block_size(); + + let transactions_to_include = { + let txn_queue = builder.txn_queue.read().await; + if txn_queue.is_empty() && !should_prioritize_finalization { + // Don't build an empty block + return Ok(None); + } + txn_queue + .iter() + .scan(0, |total_size, tx| { + let prev_size = *total_size; + *total_size += tx.min_block_size; + // We will include one transaction over our target block length + // if it's the first transaction in queue, otherwise we'd have a possible failure + // state where a single transaction larger than target block state is stuck in + // queue and we just build empty blocks forever + if *total_size >= max_block_size && prev_size != 0 { + None + } else { + // Note: we're going to map from ReceivedTransaction to + // Transaction it contains later, so we can just clone + // the Arc here to reduce the time we hold the lock + Some(Arc::clone(tx)) + } + }) + .collect::>() + }; + + let (payload, metadata) = + match >::from_transactions( + transactions_to_include + .into_iter() + .map(|tx| tx.transaction.clone()), + &builder.validated_state, + &self.instance_state, + ) + .await + { + Ok((payload, metadata)) => (payload, metadata), + Err(error) => { + warn!(?error, "Failed to build block payload"); + return Err(Error::BuildBlock(error)); + } + }; + + // count the number of txns + let actual_txn_count = payload.num_transactions(&metadata); + let truncated = actual_txn_count == 0; + + // Payload is empty despite us checking that tx_queue isn't empty earlier. + // + // This means that the block was truncated due to *sequencer* block length + // limits, which are different from our `max_block_size`. There's no good way + // for us to check for this in advance, so we detect transactions too big for + // the sequencer indirectly, by observing that we passed some transactions + // to `>::from_transactions`, but + // it returned an empty block. + // Thus we deduce that the first transaction in our queue is too big to *ever* + // be included, because it alone goes over sequencer's block size limit. + if truncated { + builder.txn_queue.write().await.pop_front(); + if !should_prioritize_finalization { + return Ok(None); + } + } + + let encoded_txns: Vec = payload.encode().to_vec(); + let block_size: u64 = encoded_txns.len() as u64; + let offered_fee: u64 = self.base_fee * block_size; + + // Get the number of nodes stored while processing the `claim_block_with_num_nodes` request + // or upon initialization. + let num_nodes = self.num_nodes.load(Ordering::Relaxed); + + let fut = async move { + let join_handle = tokio::task::spawn_blocking(move || { + precompute_vid_commitment(&encoded_txns, num_nodes) + }); + join_handle.await.unwrap() + }; + + info!( + builder_id = %builder.id(), + txn_count = actual_txn_count, + block_size, + "Built a block", + ); + + Ok(Some(BlockInfo { + block_payload: payload, + block_size, + metadata, + vid_data: WaitAndKeep::new(Box::pin(fut)), + offered_fee, + truncated, + })) + } + + #[instrument(skip_all, + fields(state_id = %state_id) + )] + pub(crate) async fn available_blocks_implementation( + &self, + state_id: BuilderStateId, + ) -> Result>, Error> { + let start = Instant::now(); + + let check_period = self.max_api_waiting_time / RETRY_LOOP_RESOLUTION; + let time_to_wait_for_matching_builder = + self.max_api_waiting_time / BUILDER_STATE_EXACT_MATCH_DIVISOR; + + let builder = match timeout( + time_to_wait_for_matching_builder, + self.wait_for_builder_state(&state_id, check_period), + ) + .await + { + Ok(Ok(builder)) => Some(builder), + Err(_) => { + // Timeout waiting for ideal state, get the highest view builder instead + warn!("Couldn't find the ideal builder state"); + self.coordinator.highest_view_builder().await + } + Ok(Err(e)) => { + // State already decided + let lowest_view = self.coordinator.lowest_view().await; + warn!( + ?lowest_view, + "get_available_blocks request for decided view" + ); + return Err(e); + } + }; + + let Some(builder) = builder else { + if let Some(cached_block) = self.block_store.read().await.get_cached(&state_id) { + return Ok(vec![cached_block.signed_response(&self.builder_keys)?]); + } else { + return Err(Error::NotFound); + }; + }; + + let build_block_timeout = self + .max_api_waiting_time + .saturating_sub(start.elapsed()) + .div_f32(1.1); + match timeout(build_block_timeout, self.build_block(builder)) + .await + .map_err(|_| Error::ApiTimeout) + { + // Success + Ok(Ok(Some(info))) => { + let block_id = BlockId { + hash: info.block_payload.builder_commitment(&info.metadata), + view: state_id.parent_view, + }; + + let response = info.signed_response(&self.builder_keys)?; + + { + let mut mutable_state = self.block_store.write().await; + mutable_state.update(state_id, block_id, info); + } + + Ok(vec![response]) + } + // Success, but no block: we don't have transactions and aren't prioritizing finalization + Ok(Ok(None)) => Ok(vec![]), + // Error building block, try to respond with a cached one as last-ditch attempt + Ok(Err(e)) | Err(e) => { + if let Some(cached_block) = self.block_store.read().await.get_cached(&state_id) { + Ok(vec![cached_block.signed_response(&self.builder_keys)?]) + } else { + Err(e) + } + } + } + } + + #[instrument(skip_all, + fields(block_id = %block_id) + )] + pub(crate) async fn claim_block_implementation( + &self, + block_id: BlockId, + ) -> Result, Error> { + let (block_payload, metadata) = { + // We store this read lock guard separately to make it explicit + // that this will end up holding a lock for the duration of this + // closure. + // + // Additionally, we clone the properties from the block_info that + // end up being cloned if found anyway. Since we know this already + // we can perform the clone here to avoid holding the lock for + // longer than needed. + let mutable_state_read = self.block_store.read().await; + let block_info = mutable_state_read + .get_block(&block_id) + .ok_or(Error::NotFound)?; + + block_info.vid_data.start(); + ( + block_info.block_payload.clone(), + block_info.metadata.clone(), + ) + }; + + let (pub_key, sign_key) = self.builder_keys.clone(); + + // sign over the builder commitment, as the proposer can computer it based on provide block_payload + // and the metadata + let response_block_hash = block_payload.builder_commitment(&metadata); + let signature_over_builder_commitment = + ::BuilderSignatureKey::sign_builder_message( + &sign_key, + response_block_hash.as_ref(), + ) + .map_err(Error::Signing)?; + + let block_data = AvailableBlockData:: { + block_payload, + metadata, + signature: signature_over_builder_commitment, + sender: pub_key, + }; + + info!("Sending Claim Block data for {block_id}",); + Ok(block_data) + } + + #[instrument(skip_all, + fields(block_id = %block_id) + )] + pub(crate) async fn claim_block_header_input_implementation( + &self, + block_id: BlockId, + ) -> Result<(bool, AvailableBlockHeaderInput), Error> { + let metadata; + let offered_fee; + let truncated; + let vid_data; + { + // We store this read lock guard separately to make it explicit + // that this will end up holding a lock for the duration of this + // closure. + // + // Additionally, we clone the properties from the block_info that + // end up being cloned if found anyway. Since we know this already + // we can perform the clone here to avoid holding the lock for + // longer than needed. + let mutable_state_read_lock_guard = self.block_store.read().await; + let block_info = mutable_state_read_lock_guard + .get_block(&block_id) + .ok_or(Error::NotFound)?; + + metadata = block_info.metadata.clone(); + offered_fee = block_info.offered_fee; + truncated = block_info.truncated; + vid_data = block_info.vid_data.clone(); + }; + + let (vid_commitment, vid_precompute_data) = vid_data.resolve().await; + + // sign over the vid commitment + let signature_over_vid_commitment = + ::BuilderSignatureKey::sign_builder_message( + &self.builder_keys.1, + vid_commitment.as_ref(), + ) + .map_err(Error::Signing)?; + + let signature_over_fee_info = Types::BuilderSignatureKey::sign_fee( + &self.builder_keys.1, + offered_fee, + &metadata, + &vid_commitment, + ) + .map_err(Error::Signing)?; + + let response = AvailableBlockHeaderInput:: { + vid_commitment, + vid_precompute_data, + fee_signature: signature_over_fee_info, + message_signature: signature_over_vid_commitment, + sender: self.builder_keys.0.clone(), + }; + info!("Sending Claim Block Header Input response"); + Ok((truncated, response)) + } +} + +#[derive(derive_more::Deref, derive_more::DerefMut)] +#[deref(forward)] +#[deref_mut(forward)] +pub struct ProxyGlobalState(pub Arc>); + +/* +Handling Builder API responses +*/ +#[async_trait] +impl BuilderDataSource for ProxyGlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + #[tracing::instrument(skip_all)] + async fn available_blocks( + &self, + parent_block: &VidCommitment, + parent_view: u64, + sender: Types::SignatureKey, + signature: &::PureAssembledSignatureType, + ) -> Result>, BuildError> { + // verify the signature + if !sender.validate(signature, parent_block.as_ref()) { + warn!("Signature validation failed"); + return Err(Error::::SignatureValidation.into()); + } + + let state_id = BuilderStateId { + parent_commitment: *parent_block, + parent_view: Types::View::new(parent_view), + }; + + trace!("Requesting available blocks"); + + let available_blocks = timeout( + self.max_api_waiting_time, + self.available_blocks_implementation(state_id), + ) + .await + .map_err(|_| Error::::ApiTimeout)??; + + Ok(available_blocks) + } + + #[tracing::instrument(skip_all)] + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + // verify the signature + if !sender.validate(signature, block_hash.as_ref()) { + warn!("Signature validation failed"); + return Err(Error::::SignatureValidation.into()); + } + + let block_id = BlockId { + hash: block_hash.clone(), + view: Types::View::new(view_number), + }; + + trace!("Processing claim block request"); + + let block = timeout( + self.max_api_waiting_time, + self.claim_block_implementation(block_id), + ) + .await + .map_err(|_| Error::::ApiTimeout)??; + + Ok(block) + } + + #[tracing::instrument(skip_all)] + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: ::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError> { + // Update the stored `num_nodes` with the given value, which will be used for VID computation. + trace!( + new_num_nodes = num_nodes, + old_num_nodes = self.num_nodes.load(Ordering::Relaxed), + "Updating num_nodes" + ); + + self.num_nodes.store(num_nodes, Ordering::Relaxed); + + self.claim_block(block_hash, view_number, sender, signature) + .await + } + + #[tracing::instrument(skip_all)] + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + let start = Instant::now(); + // verify the signature + if !sender.validate(signature, block_hash.as_ref()) { + warn!("Signature validation failed in claim_block_header_input"); + return Err(Error::::SignatureValidation.into()); + } + + let block_id = BlockId { + hash: block_hash.clone(), + view: Types::View::new(view_number), + }; + + trace!("Processing claim_block_header_input request"); + + let (truncated, info) = timeout( + self.max_api_waiting_time, + self.claim_block_header_input_implementation(block_id), + ) + .await + .inspect_err(|_| { + // we can't keep up with this block size, reduce max block size + self.block_size_limits.decrement_block_size(); + }) + .map_err(|_| Error::::ApiTimeout)??; + + if self.max_api_waiting_time.saturating_sub(start.elapsed()) + > self.max_api_waiting_time / VID_RESPONSE_TARGET_MARGIN_DIVISOR + { + // Increase max block size + self.block_size_limits.try_increment_block_size(truncated); + } + + Ok(info) + } + + /// Returns the public key of the builder + #[tracing::instrument(skip_all)] + async fn builder_address( + &self, + ) -> Result<::BuilderSignatureKey, BuildError> { + Ok(self.builder_keys.0.clone()) + } +} + +#[async_trait] +impl AcceptsTxnSubmits for ProxyGlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + async fn submit_txns( + &self, + txns: Vec<::Transaction>, + ) -> Result::Transaction>>, BuildError> { + txns.into_iter() + .map(|txn| ReceivedTransaction::new(txn, TransactionSource::Private)) + .map(|txn| async { + let commit = txn.commit; + self.0.handle_transaction(txn).await?; + Ok(commit) + }) + .collect::>() + .try_collect() + .await + } + + async fn txn_status( + &self, + txn_hash: Commitment<::Transaction>, + ) -> Result { + Ok(self.coordinator.tx_status(&txn_hash)) + } +} + +#[async_trait] +impl ReadState for ProxyGlobalState { + type State = ProxyGlobalState; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} diff --git a/crates/legacy/src/refactored/testing/basic.rs b/crates/legacy/src/refactored/testing/basic.rs new file mode 100644 index 00000000..f3b1733a --- /dev/null +++ b/crates/legacy/src/refactored/testing/basic.rs @@ -0,0 +1,370 @@ +use async_broadcast::broadcast; +use hotshot::types::{EventType, SignatureKey}; + +use hotshot_builder_api::v0_1::data_source::BuilderDataSource; +use hotshot_example_types::block_types::{TestBlockHeader, TestMetadata, TestTransaction}; +use hotshot_example_types::node_types::{TestTypes, TestVersions}; +use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; +use hotshot_types::data::{Leaf2, QuorumProposal2, ViewNumber}; +use hotshot_types::drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}; +use hotshot_types::event::LeafInfo; +use hotshot_types::simple_certificate::QuorumCertificate; +use hotshot_types::traits::block_contents::BlockHeader; +use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; +use hotshot_types::utils::BuilderCommitment; +use hotshot_types::vid::VidCommitment; +use marketplace_builder_shared::error::Error; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; +use marketplace_builder_shared::testing::constants::{ + TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +}; +use tokio::time::sleep; +use tracing_test::traced_test; + +use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; +use crate::testing::{assert_eq_generic_err, sign, TestServiceWrapper, MOCK_LEADER_KEYS}; +use std::sync::Arc; +use std::time::Duration; + +/// This test simulates consensus performing as expected and builder processing a number +/// of transactions +#[tokio::test] +#[traced_test] +async fn test_builder() { + // Number of views to simulate + const NUM_ROUNDS: usize = 5; + // Number of transactions to submit per round + const NUM_TXNS_PER_ROUND: usize = 4; + + let global_state = GlobalState::new( + BuilderConfig::test(), + TestInstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender.clone()).await; + global_state.start_event_loop(event_stream); + + // Transactions to send + let all_transactions = (0..NUM_ROUNDS) + .map(|round| { + (0..NUM_TXNS_PER_ROUND) + .map(|tx_num| TestTransaction::new(vec![round as u8, tx_num as u8])) + .collect::>() + }) + .collect::>(); + + // set up state to track between simulated consensus rounds + let mut prev_proposed_transactions: Option> = None; + let mut transaction_history = Vec::new(); + + let mut chain_state = SimulatedChainState::new(event_stream_sender.clone()); + + // Simulate NUM_ROUNDS of consensus. First we submit the transactions for this round to the builder, + // then construct DA and Quorum Proposals based on what we received from builder in the previous round + // and request a new bundle. + #[allow(clippy::needless_range_loop)] // intent is clearer this way + for round in 0..NUM_ROUNDS { + // simulate transaction being submitted to the builder + test_service + .submit_transactions(all_transactions[round].clone()) + .await; + + // get transactions submitted in previous rounds, [] for genesis + // and simulate the block built from those + let builder_state_id = chain_state + .simulate_consensus_round(prev_proposed_transactions) + .await; + + // get response + let transactions = test_service.get_transactions(&builder_state_id).await; + + // in the next round we will use received transactions to simulate + // the block being proposed + prev_proposed_transactions = Some(transactions.clone()); + // save transactions to history + transaction_history.extend(transactions); + } + + // we should've served all transactions submitted, and in correct order + assert_eq!( + transaction_history, + all_transactions.into_iter().flatten().collect::>() + ); +} + +// This test checks that builder prunes saved blocks on decide +#[tokio::test] +#[traced_test] +async fn test_pruning() { + // Number of views to simulate + const NUM_ROUNDS: usize = 10; + // View number of decide event + const DECIDE_VIEW: u64 = 5; + // Number of transactions to submit per round + const NUM_TXNS_PER_ROUND: usize = 4; + + let global_state = GlobalState::new( + BuilderConfig::test(), + TestInstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender.clone()).await; + Arc::clone(&global_state).start_event_loop(event_stream); + + // Transactions to send + let all_transactions = (0..NUM_ROUNDS) + .map(|round| { + (0..NUM_TXNS_PER_ROUND) + .map(|tx_num| TestTransaction::new(vec![round as u8, tx_num as u8])) + .collect::>() + }) + .collect::>(); + + // set up state to track between simulated consensus rounds + let mut prev_proposed_transactions: Option> = None; + let mut transaction_history = Vec::new(); + + let mut chain_state = SimulatedChainState::new(event_stream_sender.clone()); + + // Simulate NUM_ROUNDS of consensus. First we submit the transactions for this round to the builder, + // then construct DA and Quorum Proposals based on what we received from builder in the previous round + // and request a new bundle. + #[allow(clippy::needless_range_loop)] // intent is clearer this way + for round in 0..NUM_ROUNDS { + // All tiered maps shouldn't be pruned + assert_eq!( + *global_state + .block_store + .read() + .await + .blocks + .lowest_view() + .unwrap_or(ViewNumber::genesis()), + 0, + ); + assert_eq!( + *global_state + .block_store + .read() + .await + .block_cache + .lowest_view() + .unwrap_or(ViewNumber::genesis()), + 0, + ); + assert_eq!(*global_state.coordinator.lowest_view().await, 0); + + // simulate transaction being submitted to the builder + test_service + .submit_transactions(all_transactions[round].clone()) + .await; + + // get transactions submitted in previous rounds, [] for genesis + // and simulate the block built from those + let builder_state_id = chain_state + .simulate_consensus_round(prev_proposed_transactions) + .await; + + // get response + let transactions = test_service.get_transactions(&builder_state_id).await; + + // in the next round we will use received transactions to simulate + // the block being proposed + prev_proposed_transactions = Some(transactions.clone()); + // save transactions to history + transaction_history.extend(transactions); + } + + // Send a bogus decide event. The only thing we care about is the leaf's view number, + // everything else is boilerplate. + + let mock_qc = + QuorumCertificate::genesis::(&Default::default(), &Default::default()) + .await + .to_qc2(); + let leaf = Leaf2::from_quorum_proposal(&QuorumProposal2 { + block_header: >::genesis( + &Default::default(), + Default::default(), + BuilderCommitment::from_bytes([]), + TestMetadata { + num_transactions: 0, + }, + ), + view_number: ViewNumber::new(DECIDE_VIEW), + justify_qc: mock_qc.clone(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }); + event_stream_sender + .broadcast(hotshot::types::Event { + view_number: ViewNumber::new(NUM_ROUNDS as u64), + event: EventType::Decide { + leaf_chain: Arc::new(vec![LeafInfo { + leaf, + state: Arc::new(TestValidatedState::default()), + delta: None, + vid_share: None, + }]), + qc: Arc::new(mock_qc), + block_size: None, + }, + }) + .await + .unwrap(); + + // Give builder time to handle decide event + sleep(Duration::from_millis(100)).await; + + // Tiered maps should be pruned now + assert_eq!( + *global_state + .block_store + .read() + .await + .blocks + .lowest_view() + .unwrap(), + DECIDE_VIEW + ); + assert_eq!( + *global_state + .block_store + .read() + .await + .block_cache + .lowest_view() + .unwrap(), + DECIDE_VIEW + ); + assert_eq!(*global_state.coordinator.lowest_view().await, DECIDE_VIEW); +} + +#[tokio::test] +#[traced_test] +async fn test_signature_checks() { + let expected_signing_keys = &MOCK_LEADER_KEYS; + let wrong_signing_key = + <::SignatureKey as SignatureKey>::generated_from_seed_indexed( + [0; 32], 1, + ) + .1; + + // Signature over wrong data by expected signing key + let signature_over_bogus_data = sign(&[42]); + + // Sign correct data with unexpected key + let sign_with_wrong_key = |data| { + <::SignatureKey as SignatureKey>::sign(&wrong_signing_key, data) + .unwrap() + }; + + let builder_commitment = BuilderCommitment::from_bytes([]); + let vid_commitment = VidCommitment::default(); + + let global_state = GlobalState::::new( + BuilderConfig::test(), + TestInstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + let test_service = ProxyGlobalState(global_state); + + // Available blocks + { + // Verification should fail if signature is over incorrect data + let err = test_service + .available_blocks( + &vid_commitment, + 0, + expected_signing_keys.0, + &signature_over_bogus_data, + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + + // Verification should also fail if signature is over correct data but by incorrect key + let err = test_service + .available_blocks( + &vid_commitment, + 0, + expected_signing_keys.0, + &sign_with_wrong_key(vid_commitment.as_ref()), + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + } + + // Claim block + { + // Verification should fail if signature is over incorrect data + let err = test_service + .claim_block( + &builder_commitment, + 0, + expected_signing_keys.0, + &signature_over_bogus_data, + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + + // Verification should also fail if signature is over correct data but by incorrect key + let err = test_service + .claim_block( + &builder_commitment, + 0, + expected_signing_keys.0, + &sign_with_wrong_key(builder_commitment.as_ref()), + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + } + + // Claim block header input + { + // Verification should fail if signature is over incorrect data + let err = test_service + .claim_block_header_input( + &builder_commitment, + 0, + expected_signing_keys.0, + &signature_over_bogus_data, + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + + // Verification should also fail if signature is over correct data but by incorrect key + let err = test_service + .claim_block_header_input( + &builder_commitment, + 0, + expected_signing_keys.0, + &sign_with_wrong_key(builder_commitment.as_ref()), + ) + .await + .expect_err("Signature verification should've failed"); + + assert_eq_generic_err(err, Error::SignatureValidation); + } +} diff --git a/crates/legacy/src/refactored/testing/block_size.rs b/crates/legacy/src/refactored/testing/block_size.rs new file mode 100644 index 00000000..15df76f9 --- /dev/null +++ b/crates/legacy/src/refactored/testing/block_size.rs @@ -0,0 +1,174 @@ +use async_broadcast::broadcast; +use committable::Committable; +use hotshot_builder_api::v0_1::builder::TransactionStatus; +use hotshot_example_types::block_types::TestTransaction; +use hotshot_example_types::state_types::TestInstanceState; +use hotshot_types::data::ViewNumber; +use hotshot_types::traits::node_implementation::ConsensusTime; +use hotshot_types::vid::VidCommitment; +use marketplace_builder_shared::block::{BlockId, BuilderStateId}; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; +use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; +use tracing_test::traced_test; + +use crate::block_size_limits::BlockSizeLimits; +use crate::service::{BuilderConfig, GlobalState}; +use crate::testing::TestServiceWrapper; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::time::Duration; + +/// This tests simulates size limits being decreased lower than our capacity +/// and then checks that size limits return to protocol maximum over time +#[tokio::test] +#[traced_test] +async fn block_size_increment() { + // Number of views we'll need to simulate to reach protocol max block size + // Basically compound interest formula solved for time + let num_rounds: u64 = + (((PROTOCOL_MAX_BLOCK_SIZE / BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR) as f64).ln() + / (1.0 + 1f64 / BlockSizeLimits::MAX_BLOCK_SIZE_CHANGE_DIVISOR as f64).ln()) + .ceil() as u64; + + // Max block size for this test. Relatively low + // so that we don't spend a lot of rounds in this test + // in this test + const PROTOCOL_MAX_BLOCK_SIZE: u64 = BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR * 3; + + let mut cfg = BuilderConfig::test(); + // We don't want to delay increments for this test + cfg.max_block_size_increment_period = Duration::ZERO; + let global_state = GlobalState::new( + cfg, + TestInstanceState::default(), + PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + // Manually set the limits + global_state.block_size_limits.mutable_state.store( + crate::block_size_limits::MutableState { + max_block_size: BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR, + last_block_size_increment: coarsetime::Instant::now().as_ticks(), + }, + Ordering::Relaxed, + ); + + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender.clone()).await; + Arc::clone(&global_state).start_event_loop(event_stream); + + // set up state to track between simulated consensus rounds + let mut chain_state = SimulatedChainState::new(event_stream_sender.clone()); + + // Simulate NUM_ROUNDS of consensus. First we submit the transactions for this round to the builder, + // then construct DA and Quorum Proposals based on what we received from builder in the previous round + // and request a new bundle. + #[allow(clippy::needless_range_loop)] // intent is clearer this way + for round in 0..num_rounds { + // We should still be climbing + assert_ne!( + global_state + .block_size_limits + .mutable_state + .load(Ordering::Relaxed) + .max_block_size, + PROTOCOL_MAX_BLOCK_SIZE, + "On round {round}/{num_rounds} we shouldn't be back to PROTOCOL_MAX_BLOCK_SIZE yet" + ); + + // simulate transaction being submitted to the builder + test_service + .submit_transactions(vec![TestTransaction::default()]) + .await; + + // get transactions submitted in previous rounds, [] for genesis + // and simulate the block built from those + let builder_state_id = chain_state.simulate_consensus_round(None).await; + + // Get response. Called through + let mut available_states = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + + if let Some(block_info) = available_states.pop() { + let block_id = BlockId { + hash: block_info.block_hash, + view: builder_state_id.parent_view, + }; + // Get header input, this should trigger block size limits increment + test_service + .claim_block_header_input(&block_id) + .await + .expect("Failed to claim header input"); + } + } + + // We should've returned to protocol max block size + assert_eq!( + global_state + .block_size_limits + .mutable_state + .load(Ordering::Relaxed) + .max_block_size, + PROTOCOL_MAX_BLOCK_SIZE + ) +} + +#[tokio::test] +#[traced_test] +async fn huge_transactions() { + // Max block size for this test. As low as possible + // so that we don't spend a lot of time in this test + const PROTOCOL_MAX_BLOCK_SIZE: u64 = BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR; + // Number of big transactions to send to the builder + const N_BIG_TRANSACTIONS: usize = 10; + + let global_state = GlobalState::new( + BuilderConfig::test(), + TestInstanceState::default(), + PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender).await; + Arc::clone(&global_state).start_event_loop(event_stream); + + let almost_too_big = TestTransaction::new(vec![0u8; PROTOCOL_MAX_BLOCK_SIZE as usize]); + let too_big = TestTransaction::new(vec![0u8; PROTOCOL_MAX_BLOCK_SIZE as usize + 1]); + let too_big_commitment = too_big.commit(); + + test_service + .submit_transactions_private(vec![almost_too_big.clone(); N_BIG_TRANSACTIONS]) + .await + .unwrap(); + + test_service + .submit_transactions_private(vec![too_big]) + .await + .unwrap_err(); + + // Should also update the tx status + assert!(matches!( + test_service + .proxy_global_state + .coordinator + .tx_status(&too_big_commitment), + TransactionStatus::Rejected { .. } + )); + + // Builder shouldn't exceed the maximum block size, so transactions + // should be included one-by-one + assert_eq!( + vec![almost_too_big], + test_service + .get_transactions(&BuilderStateId { + parent_view: ViewNumber::genesis(), + parent_commitment: VidCommitment::default(), + }) + .await + ) +} diff --git a/crates/legacy/src/refactored/testing/finalization.rs b/crates/legacy/src/refactored/testing/finalization.rs new file mode 100644 index 00000000..13b1d216 --- /dev/null +++ b/crates/legacy/src/refactored/testing/finalization.rs @@ -0,0 +1,176 @@ +use async_broadcast::broadcast; +use tracing_test::traced_test; + +use hotshot_example_types::block_types::TestTransaction; +use hotshot_example_types::state_types::TestInstanceState; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; +use marketplace_builder_shared::testing::constants::{ + TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, +}; + +use crate::service::{BuilderConfig, GlobalState, ALLOW_EMPTY_BLOCK_PERIOD}; +use crate::testing::TestServiceWrapper; +use std::sync::Arc; + +// How many times consensus will re-try getting available blocks +const NUM_RETRIES: usize = 5; + +/// [test_empty_block_rate] is a test to ensure that if we don't have any +/// transactions being submitted, that the builder will continue it's current +/// behavior of not proposing empty blocks. +/// +/// |> Note: this test simulates how consensus interacts with the Builder in a +/// |> very basic way. When consensus asks for available blocks, and the +/// |> Builder returns an error that indicates that it does not have any blocks +/// |> to propose, consensus will retry a few times before giving up. As a +/// |> result the number of times that consensus has to ask the Builder for +/// |> block is an integral part of this test. +#[tokio::test] +async fn test_empty_block_rate() { + // Number of views to simulate + const NUM_ROUNDS: u64 = 5; + const { + assert!(NUM_ROUNDS > ALLOW_EMPTY_BLOCK_PERIOD); + } + + let global_state = GlobalState::new( + BuilderConfig::test(), + TestInstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender.clone()).await; + global_state.start_event_loop(event_stream); + + let mut chain_state = SimulatedChainState::new(event_stream_sender); + + // Simulate NUM_ROUNDS of consensus. First we submit the transactions for this round to the builder, + // then construct DA and Quorum Proposals based on what we received from builder in the previous round + // and request a new bundle. + #[allow(clippy::needless_range_loop)] // intent is clearer this way + for _ in 0..NUM_ROUNDS { + let builder_state_id = chain_state.simulate_consensus_round(None).await; + + // get response + for _ in 0..NUM_RETRIES { + let available_blocks = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + assert!( + available_blocks.is_empty(), + "Builder shouldn't be building empty block without recent blocks with transactions" + ); + } + } +} + +/// [test_eager_block_rate] is a test that ensures that the builder will propose +/// empty blocks, if consensus indicates a proposal included transactions. +/// +/// It checks initially that it does not propose any empty blocks in round 0. +/// It checks that it proposes a block with transactions in round 1, which +/// gets included by consensus. +/// It then checks that the next `allow_empty_block_period` rounds return empty +/// blocks without the need to retry. +/// It then checks that the remaining rounds will not propose any empty blocks. +/// +/// |> Note: this test simulates how consensus interacts with the Builder in a +/// |> very basic way. When consensus asks for available blocks, and the +/// |> Builder returns an error that indicates that it does not have any blocks +/// |> to propose, consensus will retry a few times before giving up. As a +/// |> result the number of times that consensus has to ask the Builder for +/// |> block is an integral part of this test. +#[tokio::test] +#[traced_test] +async fn test_eager_block_rate() { + let global_state = GlobalState::new( + BuilderConfig::test(), + TestInstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + ); + + let (event_stream_sender, event_stream) = broadcast(1024); + let test_service = + TestServiceWrapper::new(Arc::clone(&global_state), event_stream_sender.clone()).await; + global_state.start_event_loop(event_stream); + + let mut chain_state = SimulatedChainState::new(event_stream_sender); + + // First round, shouldn't be building empty blocks + { + let builder_state_id = chain_state.simulate_consensus_round(None).await; + + // get response + for _ in 0..NUM_RETRIES { + let available_blocks = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + assert!( + available_blocks.is_empty(), + "Builder shouldn't be building empty block without recent blocks with transactions" + ); + } + } + + // Second round, simulate a transaction has been proposed. Now builder should be proposing empty blocks. + { + let builder_state_id = chain_state + .simulate_consensus_round(Some(vec![TestTransaction::default()])) + .await; + + // get response + for _ in 0..NUM_RETRIES { + let available_blocks = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + assert_eq!( + available_blocks.first().unwrap().block_size, + 0, + "Builder should be building empty blocks: we've had a transaction included this round" + ); + } + } + + // Second round, simulate a transaction has been proposed. Now builder should be proposing empty blocks. + for _ in 0..ALLOW_EMPTY_BLOCK_PERIOD { + let builder_state_id = chain_state + .simulate_consensus_round(Some(vec![TestTransaction::default()])) + .await; + + // get response + for _ in 0..NUM_RETRIES { + let available_blocks = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + assert_eq!( + available_blocks.first().unwrap().block_size, + 0, + "Builder should be building empty blocks: we've had a transaction included recently" + ); + } + } + + { + let builder_state_id = chain_state.simulate_consensus_round(None).await; + + // get response + for _ in 0..NUM_RETRIES { + let available_blocks = test_service + .get_available_blocks(&builder_state_id) + .await + .unwrap(); + assert!( + available_blocks.is_empty(), + "Builder shouldn't be building empty block without recent blocks with transactions" + ); + } + } +} diff --git a/crates/legacy/src/refactored/testing/integration.rs b/crates/legacy/src/refactored/testing/integration.rs new file mode 100644 index 00000000..1bfe1f9b --- /dev/null +++ b/crates/legacy/src/refactored/testing/integration.rs @@ -0,0 +1,203 @@ +//! This module implements interfaces necessary to run legacy builder +//! in HotShot testing harness. + +use std::{collections::HashMap, fmt::Display, marker::PhantomData, sync::Arc}; + +use async_trait::async_trait; +use hotshot::types::SignatureKey; +use hotshot_testing::{ + block_builder::{BuilderTask, TestBuilderImplementation}, + test_builder::BuilderChange, +}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::NodeType}; +use marketplace_builder_shared::testing::constants::TEST_PROTOCOL_MAX_BLOCK_SIZE; +use tagged_base64::TaggedBase64; +use tokio::spawn; +use url::Url; +use vbs::version::StaticVersion; + +use crate::service::{BuilderConfig, GlobalState}; + +/// Testing configuration for legacy builder +struct TestLegacyBuilderConfig +where + Types: NodeType, +{ + _marker: PhantomData, +} + +impl Default for TestLegacyBuilderConfig +where + Types: NodeType, +{ + fn default() -> Self { + Self { + _marker: PhantomData, + } + } +} + +/// [`TestBuilderImplementation`] for legacy builder. +/// Passed as a generic parameter to [`TestRunner::run_test`], it is be used +/// to instantiate builder API and builder task. +struct LegacyBuilderImpl {} + +#[async_trait] +impl TestBuilderImplementation for LegacyBuilderImpl +where + Types: NodeType, + Types::InstanceState: Default, + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + type Config = TestLegacyBuilderConfig; + + /// This is mostly boilerplate to instantiate and start [`ProxyGlobalState`] APIs and initial [`BuilderState`]'s event loop. + /// [`BuilderTask`] it returns will be injected into consensus runtime by HotShot testing harness and + /// will forward transactions from hotshot event stream to the builder. + async fn start( + num_nodes: usize, + url: Url, + _config: >::Config, + _changes: HashMap, + ) -> Box> { + let service = GlobalState::new( + BuilderConfig::test(), + Types::InstanceState::default(), + TEST_PROTOCOL_MAX_BLOCK_SIZE, + num_nodes, + ); + + // Create tide-disco app based on global state + let app = Arc::clone(&service) + .into_app() + .expect("Failed to create builder tide-disco app"); + + let url_clone = url.clone(); + + spawn(app.serve(url_clone, StaticVersion::<0, 1> {})); + + // Return the global state as a task that will be later started + // by the test harness with event stream from one of HS nodes + Box::new(LegacyBuilderTask { service }) + } +} + +/// Legacy builder task. Stores all the necessary information to run builder service +struct LegacyBuilderTask +where + Types: NodeType, +{ + service: Arc>, +} + +impl BuilderTask for LegacyBuilderTask +where + Types: NodeType, + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + fn start( + self: Box, + stream: Box< + dyn futures::prelude::Stream> + + std::marker::Unpin + + Send + + 'static, + >, + ) { + self.service.start_event_loop(stream); + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::testing::integration::LegacyBuilderImpl; + use marketplace_builder_shared::testing::{ + generation::{self, TransactionGenerationConfig}, + run_test, + validation::BuilderValidationConfig, + }; + + use hotshot_example_types::node_types::TestVersions; + use hotshot_example_types::node_types::{MemoryImpl, TestTypes}; + use hotshot_macros::cross_tests; + use hotshot_testing::{ + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + test_builder::TestDescription, + }; + + #[tokio::test(flavor = "multi_thread")] + #[tracing::instrument] + #[ignore = "slow"] + async fn basic_test() { + let num_successful_views = 45; + let min_txns_per_view = 5; + + run_test::( + TestDescription { + txn_description: hotshot_testing::txn_task::TxnTaskDescription::RoundRobinTimeBased( + Duration::MAX, + ), + completion_task_description: + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ), + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views, + num_failed_views: 5, + ..Default::default() + }, + ..TestDescription::default() + }, + BuilderValidationConfig { + expected_txn_num: num_successful_views * min_txns_per_view, + }, + TransactionGenerationConfig { + strategy: generation::GenerationStrategy::Random { + min_per_view: min_txns_per_view, + max_per_view: 10, + min_tx_size: 128, + max_tx_size: 1280, + }, + endpoints: vec![], + }, + ) + .await; + } + + cross_tests!( + TestName: example_cross_test, + Impls: [MemoryImpl], + BuilderImpls: [LegacyBuilderImpl], + Types: [TestTypes], + Versions: [TestVersions], + Ignore: true, + Metadata: { + TestDescription { + validate_transactions : hotshot_testing::test_builder::nonempty_block_threshold((90,100)), + txn_description : hotshot_testing::txn_task::TxnTaskDescription::RoundRobinTimeBased(Duration::from_millis(10)), + completion_task_description : CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(120), + }, + ), + overall_safety_properties: OverallSafetyPropertiesDescription { + num_successful_views: 50, + num_failed_views: 5, + ..Default::default() + }, + ..Default::default() + } + }, + ); +} diff --git a/crates/legacy/src/refactored/testing/mod.rs b/crates/legacy/src/refactored/testing/mod.rs new file mode 100644 index 00000000..3be68736 --- /dev/null +++ b/crates/legacy/src/refactored/testing/mod.rs @@ -0,0 +1,190 @@ +// I can't "mark this sync", clippy +#![allow(clippy::declare_interior_mutable_const)] +#![allow(clippy::borrow_interior_mutable_const)] + +use std::cell::LazyCell; +use std::sync::Arc; +use std::time::Duration; + +use async_broadcast::Sender; +use committable::Commitment; +use hotshot::rand::{thread_rng, Rng}; +use hotshot::types::{BLSPubKey, Event, EventType, SignatureKey}; +use hotshot_builder_api::v0_1::block_info::AvailableBlockHeaderInput; +use hotshot_builder_api::v0_1::builder::BuildError; +use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; +use hotshot_builder_api::v0_1::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}; +use hotshot_example_types::block_types::TestTransaction; +use hotshot_example_types::node_types::TestTypes; +use hotshot_task_impls::builder::v0_1::BuilderClient; +use hotshot_types::data::ViewNumber; +use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; +use marketplace_builder_shared::block::{BlockId, BuilderStateId}; +use marketplace_builder_shared::error::Error; +use marketplace_builder_shared::utils::BuilderKeys; +use tokio::spawn; +use url::Url; +use vbs::version::StaticVersion; + +use crate::service::{GlobalState, ProxyGlobalState}; + +mod basic; +mod block_size; +mod finalization; +mod integration; + +const MOCK_LEADER_KEYS: LazyCell> = + LazyCell::new(|| BLSPubKey::generated_from_seed_indexed([0; 32], 0)); + +fn sign( + data: &[u8], +) -> <::SignatureKey as SignatureKey>::PureAssembledSignatureType { + <::SignatureKey as SignatureKey>::sign(&MOCK_LEADER_KEYS.1, data) + .unwrap() +} + +// We need to extract the error strings by hand because BuildError doesn't implement Eq +fn assert_eq_generic_err(err: BuildError, expected_err: Error) { + let BuildError::Error(expected_err_str) = expected_err.into() else { + panic!("Unexpected conversion of Error to BuildError"); + }; + println!("{:#?}", err); + let BuildError::Error(err_str) = err else { + panic!("Unexpected BuildError by builder"); + }; + assert_eq!(expected_err_str, err_str); +} + +/// Provides convenience functions on top of service API, +/// as well as routing requests through as many API surfaces as +/// possible to shake out more potential bugs +struct TestServiceWrapper { + event_sender: Sender>, + proxy_global_state: ProxyGlobalState, + client: BuilderClient, +} + +impl TestServiceWrapper { + async fn new( + global_state: Arc>, + event_stream_sender: Sender>, + ) -> Self { + let port = portpicker::pick_unused_port().unwrap(); + let url: Url = format!("http://localhost:{port}").parse().unwrap(); + let app = Arc::clone(&global_state).into_app().unwrap(); + spawn(app.serve(url.clone(), StaticVersion::<0, 1> {})); + let client = BuilderClient::new(url); + assert!(client.connect(Duration::from_secs(1)).await); + Self { + event_sender: event_stream_sender, + proxy_global_state: ProxyGlobalState(global_state), + client, + } + } +} + +impl TestServiceWrapper { + /// Proxies request to get available blocks to underlying [`ProxyGlobalState`], + /// taking care of signing + pub(crate) async fn get_available_blocks( + &self, + state_id: &BuilderStateId, + ) -> Result>, BuildError> { + self.proxy_global_state + .available_blocks( + &state_id.parent_commitment, + *state_id.parent_view, + MOCK_LEADER_KEYS.0, + &BLSPubKey::sign(&MOCK_LEADER_KEYS.1, state_id.parent_commitment.as_ref()).unwrap(), + ) + .await + } + + /// Proxies request to get claim block header input to underlying [`ProxyGlobalState`], + /// taking care of signing + pub(crate) async fn claim_block_header_input( + &self, + block_id: &BlockId, + ) -> Result, BuildError> { + self.proxy_global_state + .claim_block_header_input( + &block_id.hash, + *block_id.view, + MOCK_LEADER_KEYS.0, + &BLSPubKey::sign(&MOCK_LEADER_KEYS.1, block_id.hash.as_ref()).unwrap(), + ) + .await + } + + /// Combines get available block request and claim block request to + /// get transactions a leader would've received for the round. + /// + /// Panics on errors. + /// + /// Requests are routed through HotShot's HTTP API client to check + /// compatibility + pub(crate) async fn get_transactions( + &self, + state_id: &BuilderStateId, + ) -> Vec { + let mut available_states = self + .client + .available_blocks( + state_id.parent_commitment, + *state_id.parent_view, + MOCK_LEADER_KEYS.0, + &BLSPubKey::sign(&MOCK_LEADER_KEYS.1, state_id.parent_commitment.as_ref()).unwrap(), + ) + .await + .unwrap(); + + let Some(block_info) = available_states.pop() else { + return vec![]; + }; + + // Get block for its transactions + let block = self + .client + .claim_block( + block_info.block_hash.clone(), + *state_id.parent_view, + MOCK_LEADER_KEYS.0, + &BLSPubKey::sign(&MOCK_LEADER_KEYS.1, block_info.block_hash.as_ref()).unwrap(), + ) + .await + .unwrap(); + block.block_payload.transactions + } + + /// Emulates submission of transactions through HotShot gossiping + pub(crate) async fn submit_transactions_public(&self, transactions: Vec) { + self.event_sender + .broadcast(Event { + // This field is never actually used in the builder, so we can just use + // an arbitrary value + view_number: ViewNumber::genesis(), + event: EventType::Transactions { transactions }, + }) + .await + .unwrap(); + } + + /// Submits transactions through private mempool interface of [`ProxyGlobalState`] + pub(crate) async fn submit_transactions_private( + &self, + transactions: Vec, + ) -> Result>, BuildError> { + self.proxy_global_state.submit_txns(transactions).await + } + + /// Submits transactions randomly either through public or private mempool + pub(crate) async fn submit_transactions(&self, transactions: Vec) { + if thread_rng().gen() { + self.submit_transactions_public(transactions).await + } else { + self.submit_transactions_private(transactions) + .await + .unwrap(); + } + } +} diff --git a/crates/marketplace/Cargo.toml b/crates/marketplace/Cargo.toml index b3e1c28d..e7eb2ed1 100644 --- a/crates/marketplace/Cargo.toml +++ b/crates/marketplace/Cargo.toml @@ -6,8 +6,8 @@ edition = { workspace = true } [dependencies] marketplace-builder-shared = { path = "../shared" } -anyhow = "1" -async-broadcast = "0.7" +anyhow = { workspace = true } +async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } committable = { workspace = true } @@ -16,7 +16,6 @@ futures = { workspace = true } hotshot = { workspace = true } hotshot-builder-api = { workspace = true } hotshot-types = { workspace = true } -lru = { workspace = true } sha2 = { workspace = true } tagged-base64 = { workspace = true } tide-disco = { workspace = true } @@ -29,8 +28,9 @@ vbs = { workspace = true } hotshot-example-types = { workspace = true } hotshot-macros = { workspace = true } hotshot-testing = { workspace = true } -num_cpus = "1.16" -url = "2.5" +num_cpus = { workspace = true } +tracing-test = { workspace = true } +url = { workspace = true } [lints] workspace = true diff --git a/crates/marketplace/src/hooks.rs b/crates/marketplace/src/hooks.rs index 761218ff..9c787fb4 100644 --- a/crates/marketplace/src/hooks.rs +++ b/crates/marketplace/src/hooks.rs @@ -4,8 +4,29 @@ use async_trait::async_trait; use hotshot::types::Event; use hotshot_types::traits::node_implementation::NodeType; +/// A trait for hooks into the builder service. Used to further customize +/// builder behaviour in ways not possible in builder core. +/// If you don't need such customisation, use [`NoHooks`]. +/// +/// A simple example filtering incoming transactions based on some imaginary +/// application-specific magic byte: +/// ```rust +/// # type MyTypes = hotshot_example_types::node_types::TestTypes; +/// # type MyTransaction = hotshot_example_types::block_types::TestTransaction; +/// use marketplace_builder_core::hooks::BuilderHooks; +/// struct MyBuilderHooks { magic: u8 }; +/// +/// #[async_trait::async_trait] +/// impl BuilderHooks for MyBuilderHooks { +/// async fn process_transactions(&self, transactions: Vec) -> Vec { +/// transactions.into_iter().filter(|tx| tx.bytes()[0] == self.magic).collect() +/// } +/// } +/// ``` #[async_trait] pub trait BuilderHooks: Sync + Send + 'static { + /// Implement this to process transactions before + /// they'll be passed on to the builder. #[inline(always)] async fn process_transactions( &self, @@ -14,6 +35,13 @@ pub trait BuilderHooks: Sync + Send + 'static { transactions } + /// Handle any hotshot event _before_ the builder event loop handles it. + /// Event handling is done sequentially, i.e. you can rely on the fact + /// that the builder will process this event _after_ the hooks have finished + /// processing it. Accordingly, if this property is not important to you, + /// and especially if you're doing anything involved with this hook + /// it is advisable to spawn a task doing the actual work and return, + /// so that builder's event loop isn't blocked for too long. #[inline(always)] async fn handle_hotshot_event(&self, _event: &Event) {} } @@ -38,6 +66,7 @@ where } } +/// Hooks that do nothing pub struct NoHooks(pub PhantomData); impl BuilderHooks for NoHooks {} diff --git a/crates/marketplace/src/lib.rs b/crates/marketplace/src/lib.rs index 9bacf40d..969757f0 100644 --- a/crates/marketplace/src/lib.rs +++ b/crates/marketplace/src/lib.rs @@ -1,19 +1,15 @@ -// Copyright (c) 2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot Builder Protocol. -// - -// Builder Phase 1 -// It mainly provides three API services to hotshot proposers: -// 1. Serves a proposer(leader)'s request to provide blocks information -// 2. Serves a proposer(leader)'s request to provide the full blocks information -// 3. Serves a proposer(leader)'s request to provide the block header information - -// It also provides one API services external users: -// 1. Serves a user's request to submit a private transaction +//! Marketplace builder +//! It mainly provides this API service to hotshot proposers: +//! 1. Serves a proposer(leader)'s request to provide a bundle of transactions +//! +//! It also provides one API service to external users: +//! 1. Serves a user's request to submit a private transaction +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] pub mod hooks; pub mod service; // tracking the testing #[cfg(test)] +#[cfg_attr(coverage_nightly, coverage(off))] pub mod testing; diff --git a/crates/marketplace/src/service.rs b/crates/marketplace/src/service.rs index 46608bc5..9fd2c954 100644 --- a/crates/marketplace/src/service.rs +++ b/crates/marketplace/src/service.rs @@ -4,6 +4,7 @@ use marketplace_builder_shared::{ block::{BuilderStateId, ReceivedTransaction, TransactionSource}, coordinator::{BuilderStateCoordinator, BuilderStateLookup}, state::BuilderState, + utils::BuilderKeys, }; pub use async_broadcast::{broadcast, RecvError, TryRecvError}; @@ -44,6 +45,30 @@ pub use marketplace_builder_shared::utils::EventServiceStream; use crate::hooks::BuilderHooks; +/// Configuration to initialize the builder +#[derive(Debug, Clone)] +pub struct BuilderConfig { + /// Keys that this builder will use to sign responses + pub builder_keys: BuilderKeys, + /// Maximum time allotted for the builder to respond to an API call. + /// If the response isn't ready by this time, an error will be returned + /// to the caller. + pub api_timeout: Duration, + /// Time the builder will wait for new transactions before answering an + /// `available_blocks` API call if the builder doesn't have any transactions at the moment + /// of the call. Should be less than [`Self::api_timeout`] + pub tx_capture_timeout: Duration, + /// (Approximate) duration over which included transaction hashes will be stored + /// by the builder for deduplication of incoming transactions. + pub txn_garbage_collect_duration: Duration, + /// Channel capacity for incoming transactions for a single builder state. + pub txn_channel_capacity: usize, + /// Capacity of cache storing information for transaction status API + pub tx_status_cache_capacity: usize, + /// Base fee; the sequencing fee for a bundle is calculated as bundle size × base fee + pub base_fee: u64, +} + /// The main type implementing the marketplace builder. pub struct GlobalState where @@ -53,11 +78,7 @@ where /// Coordinator we'll rely on to manage builder states coordinator: Arc>, /// Identity keys for the builder - builder_keys: ( - Types::BuilderSignatureKey, // pub key - <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, // private key - ), - + builder_keys: BuilderKeys, /// Maximum time allotted to wait for bundle before returning an error api_timeout: Duration, /// Maximum time we're allowed to expend waiting for more transactions to @@ -65,9 +86,29 @@ where tx_capture_timeout: Duration, /// Base fee per bundle byte base_fee: u64, + /// See [`BuilderHooks`] for more information hooks: Arc, } +#[cfg(test)] +impl BuilderConfig { + pub(crate) fn test() -> Self { + use marketplace_builder_shared::testing::constants::*; + Self { + builder_keys: + ::generated_from_seed_indexed( + [0u8; 32], 66, + ), + api_timeout: TEST_API_TIMEOUT, + tx_capture_timeout: TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, + txn_garbage_collect_duration: TEST_INCLUDED_TX_GC_PERIOD, + txn_channel_capacity: TEST_CHANNEL_BUFFER_SIZE, + base_fee: TEST_BASE_FEE, + tx_status_cache_capacity: TEST_TX_STATUS_CACHE_CAPACITY, + } + } +} + impl GlobalState where Types: NodeType, @@ -77,27 +118,19 @@ where >>::Error: Display, for<'a> >::Error: Display, { - pub fn new( - builder_keys: ( - Types::BuilderSignatureKey, - <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, - ), - api_timeout: Duration, - tx_capture_timeout: Duration, - txn_garbage_collect_duration: Duration, - txn_channel_capacity: usize, - base_fee: u64, - hooks: Hooks, - ) -> Arc { - let coordinator = - BuilderStateCoordinator::new(txn_channel_capacity, txn_garbage_collect_duration); + pub fn new(config: BuilderConfig, hooks: Hooks) -> Arc { + let coordinator = BuilderStateCoordinator::new( + config.txn_channel_capacity, + config.txn_garbage_collect_duration, + config.tx_status_cache_capacity, + ); Arc::new(Self { hooks: Arc::new(hooks), coordinator: Arc::new(coordinator), - builder_keys, - api_timeout, - tx_capture_timeout, - base_fee, + builder_keys: config.builder_keys, + api_timeout: config.api_timeout, + tx_capture_timeout: config.tx_capture_timeout, + base_fee: config.base_fee, }) } @@ -133,8 +166,8 @@ where event_stream: impl Stream> + Unpin + Send + 'static, ) -> JoinHandle> { spawn(Self::event_loop( - self.coordinator.clone(), - self.hooks.clone(), + Arc::clone(&self.coordinator), + Arc::clone(&self.hooks), event_stream, )) } @@ -158,28 +191,35 @@ where tracing::error!("Error event in HotShot: {:?}", error); } EventType::Transactions { transactions } => { - let transactions = hooks.process_transactions(transactions).await; - - let _ = transactions - .into_iter() - .map(|txn| { - coordinator.handle_transaction(ReceivedTransaction::new( - txn, - TransactionSource::Public, - )) - }) - .collect::>() - .collect::>() - .await; + let hooks = Arc::clone(&hooks); + let coordinator = Arc::clone(&coordinator); + spawn(async move { + let transactions = hooks.process_transactions(transactions).await; + + let _ = transactions + .into_iter() + .map(|txn| { + coordinator.handle_transaction(ReceivedTransaction::new( + txn, + TransactionSource::Public, + )) + }) + .collect::>() + .collect::>() + .await; + }); } EventType::Decide { leaf_chain, .. } => { - coordinator.handle_decide(leaf_chain).await; + let coordinator = Arc::clone(&coordinator); + spawn(async move { coordinator.handle_decide(leaf_chain).await }); } EventType::DaProposal { proposal, .. } => { - coordinator.handle_da_proposal(proposal.data).await; + let coordinator = Arc::clone(&coordinator); + spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); } EventType::QuorumProposal { proposal, .. } => { - coordinator.handle_quorum_proposal(proposal.data).await; + let coordinator = Arc::clone(&coordinator); + spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); } _ => {} } @@ -369,10 +409,8 @@ where .map(|txn| ReceivedTransaction::new(txn, TransactionSource::Private)) .map(|txn| async { let commit = txn.commit; - self.coordinator - .handle_transaction(txn) - .await - .map(|_| commit) + self.coordinator.handle_transaction(txn).await?; + Ok(commit) }) .collect::>() .try_collect() @@ -381,11 +419,9 @@ where async fn txn_status( &self, - _txn_hash: Commitment<::Transaction>, + txn_hash: Commitment<::Transaction>, ) -> Result { - Err(BuildError::Error( - "txn_status feature Not Implemented for marketplace builder yet.".to_string(), - )) + Ok(self.coordinator.tx_status(&txn_hash)) } } diff --git a/crates/marketplace/src/testing/basic_test.rs b/crates/marketplace/src/testing/basic_test.rs index 223d2d08..1d4cf42c 100644 --- a/crates/marketplace/src/testing/basic_test.rs +++ b/crates/marketplace/src/testing/basic_test.rs @@ -1,45 +1,26 @@ use async_broadcast::broadcast; -use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; use hotshot_example_types::block_types::TestTransaction; -use marketplace_builder_shared::testing::constants::{ - TEST_API_TIMEOUT, TEST_BASE_FEE, TEST_INCLUDED_TX_GC_PERIOD, TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, -}; -use tokio::time::sleep; -use tracing_subscriber::EnvFilter; +use tracing_test::traced_test; use crate::hooks::NoHooks; -use crate::service::{GlobalState, ProxyGlobalState}; -use crate::testing::SimulatedChainState; +use crate::service::{BuilderConfig, GlobalState, ProxyGlobalState}; +use marketplace_builder_shared::testing::consensus::SimulatedChainState; use std::marker::PhantomData; use std::sync::Arc; -use std::time::Duration; /// This test simulates multiple builder states receiving messages from the channels and processing them #[tokio::test] +#[traced_test] async fn test_builder() { - // Setup logging - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - - tracing::info!("Testing the builder core with multiple messages from the channels"); - // Number of views to simulate const NUM_ROUNDS: usize = 5; // Number of transactions to submit per round const NUM_TXNS_PER_ROUND: usize = 4; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; let global_state = Arc::new(GlobalState::new( - BLSPubKey::generated_from_seed_indexed([0; 32], 0), - TEST_API_TIMEOUT, - TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, - TEST_INCLUDED_TX_GC_PERIOD, - CHANNEL_CAPACITY, - TEST_BASE_FEE, + BuilderConfig::test(), NoHooks(PhantomData), )); let proxy_global_state = ProxyGlobalState(Arc::clone(&global_state)); @@ -79,9 +60,6 @@ async fn test_builder() { .simulate_consensus_round(prev_proposed_transactions) .await; - // give builder state time to fork - sleep(Duration::from_millis(100)).await; - // get response let bundle = proxy_global_state .bundle( diff --git a/crates/marketplace/src/testing/integration.rs b/crates/marketplace/src/testing/integration.rs index 4d3f8bac..71382b42 100644 --- a/crates/marketplace/src/testing/integration.rs +++ b/crates/marketplace/src/testing/integration.rs @@ -1,7 +1,7 @@ //! This module implements interfaces necessary to run marketplace builder //! in HotShot testing harness. -use std::{collections::HashMap, fmt::Display, marker::PhantomData, sync::Arc, time::Duration}; +use std::{collections::HashMap, fmt::Display, marker::PhantomData, sync::Arc}; use async_trait::async_trait; use hotshot::types::SignatureKey; @@ -9,10 +9,7 @@ use hotshot_testing::{ block_builder::{BuilderTask, TestBuilderImplementation}, test_builder::BuilderChange, }; -use hotshot_types::{ - data::ViewNumber, - traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}, -}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::NodeType}; use tagged_base64::TaggedBase64; use tokio::spawn; use url::Url; @@ -20,11 +17,9 @@ use vbs::version::StaticVersion; use crate::{ hooks::{BuilderHooks, NoHooks}, - service::GlobalState, + service::{BuilderConfig, GlobalState}, }; -const BUILDER_CHANNEL_CAPACITY: usize = 1024; - /// Testing configuration for marketplace builder /// Stores hooks that will be used in the builder in a type-erased manner, /// allowing for runtime configuration of hooks to use in tests. @@ -72,18 +67,8 @@ where config: Self::Config, _changes: HashMap, ) -> Box> { - let builder_key_pair = Types::BuilderSignatureKey::generated_from_seed_indexed([0; 32], 0); - // Create the global state - let service = GlobalState::new( - builder_key_pair, - Duration::from_millis(500), - Duration::from_millis(10), - Duration::from_secs(60), - BUILDER_CHANNEL_CAPACITY, - 1, // Arbitrary base fee - config.hooks, - ); + let service = GlobalState::new(BuilderConfig::test(), config.hooks); // Create tide-disco app based on global state let app = Arc::clone(&service) @@ -152,7 +137,7 @@ mod tests { #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] #[ignore = "slow"] - async fn example_test() { + async fn basic_test() { let num_successful_views = 45; let min_txns_per_view = 5; @@ -187,39 +172,6 @@ mod tests { .await } - #[tokio::test(flavor = "multi_thread")] - #[tracing::instrument] - #[ignore = "slow"] - async fn stress_test() { - run_test::( - TestDescription { - completion_task_description: - CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - overall_safety_properties: OverallSafetyPropertiesDescription { - num_successful_views: 50, - num_failed_views: 5, - ..Default::default() - }, - ..TestDescription::default() - }, - BuilderValidationConfig { - expected_txn_num: num_cpus::get() * 50, - }, - TransactionGenerationConfig { - strategy: generation::GenerationStrategy::Flood { - min_tx_size: 16, - max_tx_size: 128, - }, - endpoints: vec![], - }, - ) - .await - } - cross_tests!( TestName: example_cross_test, Impls: [MemoryImpl], diff --git a/crates/marketplace/src/testing/mod.rs b/crates/marketplace/src/testing/mod.rs index ad983627..15f8e1a8 100644 --- a/crates/marketplace/src/testing/mod.rs +++ b/crates/marketplace/src/testing/mod.rs @@ -1,174 +1,3 @@ -use std::marker::PhantomData; - -use async_broadcast::Sender; -use committable::Committable; -use hotshot::{ - rand::{seq::SliceRandom, thread_rng}, - traits::BlockPayload, - types::{BLSPubKey, Event, EventType, SignatureKey}, -}; -use hotshot_example_types::{ - block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, - state_types::{TestInstanceState, TestValidatedState}, -}; -use hotshot_types::{ - data::{DaProposal, Leaf2, QuorumProposal2, ViewNumber}, - drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, - message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, - simple_vote::QuorumData2, - traits::{block_contents::vid_commitment, node_implementation::ConsensusTime}, -}; -use marketplace_builder_shared::block::BuilderStateId; -use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; -use sha2::{Digest, Sha256}; - pub mod basic_test; pub mod integration; pub mod order_test; - -pub(crate) struct SimulatedChainState { - round: ViewNumber, - previous_quorum_proposal: Option>, - event_stream_sender: Sender>, -} - -impl SimulatedChainState { - pub fn new(event_stream_sender: Sender>) -> Self { - Self { - round: ViewNumber::genesis(), - previous_quorum_proposal: None, - event_stream_sender, - } - } - - pub async fn simulate_consensus_round( - &mut self, - transactions: Option>, - ) -> BuilderStateId { - let transactions = transactions.unwrap_or_default(); - let num_transactions = transactions.len() as u64; - let encoded_transactions = TestTransaction::encode(&transactions); - let block_payload = TestBlockPayload { transactions }; - let block_vid_commitment = - vid_commitment(&encoded_transactions, TEST_NUM_NODES_IN_VID_COMPUTATION); - let metadata = TestMetadata { num_transactions }; - let block_builder_commitment = - >::builder_commitment( - &block_payload, - &metadata, - ); - - // generate key for leader of this round - let seed = [self.round.u64() as u8; 32]; - let (pub_key, private_key) = BLSPubKey::generated_from_seed_indexed(seed, self.round.u64()); - - let quorum_signature = - ::SignatureKey::sign( - &private_key, - block_vid_commitment.as_ref(), - ) - .expect("Failed to sign payload commitment while preparing Quorum proposal"); - let da_signature = - ::SignatureKey::sign( - &private_key, - Sha256::digest(&encoded_transactions).as_ref(), - ) - .expect("Failed to sign payload commitment while preparing DA proposal"); - - let da_proposal = DaProposal { - encoded_transactions: encoded_transactions.into(), - metadata, - view_number: self.round, - }; - - let block_header = TestBlockHeader { - block_number: self.round.u64(), - payload_commitment: block_vid_commitment, - builder_commitment: block_builder_commitment, - timestamp: self.round.u64(), - metadata, - random: 1, // arbitrary - }; - - let justify_qc = match self.previous_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), - Some(prev_proposal) => { - let prev_justify_qc = &prev_proposal.justify_qc; - let quorum_data = QuorumData2:: { - leaf_commit: Committable::commit(&Leaf2::from_quorum_proposal(prev_proposal)), - }; - - // form a justify qc - SimpleCertificate::, SuccessThreshold>::new( - quorum_data.clone(), - quorum_data.commit(), - prev_proposal.view_number, - prev_justify_qc.signatures.clone(), - PhantomData, - ) - } - }; - - tracing::debug!("Iteration: {} justify_qc: {:?}", self.round, justify_qc); - - let quorum_proposal = QuorumProposal2:: { - block_header, - view_number: self.round, - justify_qc: justify_qc.clone(), - upgrade_certificate: None, - view_change_evidence: None, - drb_seed: INITIAL_DRB_SEED_INPUT, - drb_result: INITIAL_DRB_RESULT, - }; - - let quorum_proposal_event = EventType::QuorumProposal { - proposal: Proposal { - data: quorum_proposal.clone(), - signature: quorum_signature, - _pd: PhantomData, - }, - sender: pub_key, - }; - - let da_proposal_event = EventType::DaProposal { - proposal: Proposal { - data: da_proposal, - signature: da_signature, - _pd: PhantomData, - }, - sender: pub_key, - }; - - let builder_state_id = BuilderStateId { - parent_commitment: block_vid_commitment, - parent_view: self.round, - }; - - let mut events = vec![quorum_proposal_event, da_proposal_event]; - // Shuffle the events to shake out possible bugs that depend on event ordering - events.shuffle(&mut thread_rng()); - - for evt in events { - self.event_stream_sender - .broadcast(Event { - view_number: self.round, - event: evt, - }) - .await - .unwrap(); - } - - // Update own state - self.round = ViewNumber::new(self.round.u64() + 1); - self.previous_quorum_proposal = Some(quorum_proposal); - - builder_state_id - } -} diff --git a/crates/marketplace/src/testing/order_test.rs b/crates/marketplace/src/testing/order_test.rs index 000ef5b4..9de02971 100644 --- a/crates/marketplace/src/testing/order_test.rs +++ b/crates/marketplace/src/testing/order_test.rs @@ -1,29 +1,19 @@ use async_broadcast::broadcast; use hotshot_builder_api::v0_99::data_source::{AcceptsTxnSubmits, BuilderDataSource}; use hotshot_types::{bundle::Bundle, traits::node_implementation::ConsensusTime}; -use marketplace_builder_shared::{ - block::BuilderStateId, - testing::constants::{ - TEST_API_TIMEOUT, TEST_BASE_FEE, TEST_INCLUDED_TX_GC_PERIOD, - TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, - }, -}; -use tracing_subscriber::EnvFilter; +use marketplace_builder_shared::{block::BuilderStateId, testing::consensus::SimulatedChainState}; +use tracing_test::traced_test; use crate::{ hooks::NoHooks, - service::{GlobalState, ProxyGlobalState}, - testing::SimulatedChainState, + service::{BuilderConfig, GlobalState, ProxyGlobalState}, }; use std::{fmt::Debug, marker::PhantomData, sync::Arc}; use hotshot_example_types::block_types::TestTransaction; -use hotshot::{ - rand::{self, seq::SliceRandom, thread_rng}, - types::{BLSPubKey, SignatureKey}, -}; +use hotshot::rand::{self, seq::SliceRandom, thread_rng}; /// [`RoundTransactionBehavior`] is an enum that is used to represent different /// behaviors that we may want to simulate during a round. This applies to @@ -126,31 +116,15 @@ fn order_check( /// It's fine that leader doesn't include some of transactions we've given, or interspersed with other transactions, /// as long as the order is correct it will be good. #[tokio::test] +#[traced_test] async fn test_builder_order() { - // Setup logging - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - - tracing::info!("Testing the builder core with multiple messages from the channels"); - /// Number of views to simulate, make sure it's larger than 5 /// so that we have enough rounds to play with const NUM_ROUNDS: usize = 10; /// Number of transactions to submit per round const NUM_TXNS_PER_ROUND: usize = 5; - /// Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; - - let global_state = GlobalState::new( - BLSPubKey::generated_from_seed_indexed([0; 32], 0), - TEST_API_TIMEOUT, - TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, - TEST_INCLUDED_TX_GC_PERIOD, - CHANNEL_CAPACITY, - TEST_BASE_FEE, - NoHooks(PhantomData), - ); + + let global_state = GlobalState::new(BuilderConfig::test(), NoHooks(PhantomData)); let proxy_global_state = ProxyGlobalState(Arc::clone(&global_state)); let (event_stream_sender, event_stream) = broadcast(1024); @@ -254,20 +228,12 @@ async fn test_builder_order() { /// with one chain proposing transactions we've given and the other not /// (we should give out the next batch if responding for first chain and both batches for the other) #[tokio::test] +#[traced_test] async fn test_builder_order_chain_fork() { - // Setup logging - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - - tracing::info!("Testing the builder core with multiple messages from the channels"); - // Number of views to simulate const NUM_ROUNDS: usize = 4; // Number of transactions to submit per round const NUM_TXNS_PER_ROUND: usize = 5; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; // the round we want to skip all the transactions for the fork chain // round 0 is pre-fork @@ -286,15 +252,7 @@ async fn test_builder_order_chain_fork() { RoundTransactionBehavior::NoAdjust }; - let global_state = GlobalState::new( - BLSPubKey::generated_from_seed_indexed([0; 32], 0), - TEST_API_TIMEOUT, - TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, - TEST_INCLUDED_TX_GC_PERIOD, - CHANNEL_CAPACITY, - TEST_BASE_FEE, - NoHooks(PhantomData), - ); + let global_state = GlobalState::new(BuilderConfig::test(), NoHooks(PhantomData)); let proxy_global_state = ProxyGlobalState(Arc::clone(&global_state)); let (event_stream_sender, event_stream) = broadcast(1024); @@ -409,30 +367,14 @@ async fn test_builder_order_chain_fork() { /// It should fail as the proposer randomly drop a subset of transactions within a bundle, /// which leads to different order of transaction. #[tokio::test] +#[traced_test] async fn test_builder_order_should_fail() { - // Setup logging - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - - tracing::info!("Testing the builder core with multiple messages from the channels"); - // Number of views to simulate const NUM_ROUNDS: usize = 10; // Number of transactions to submit per round const NUM_TXNS_PER_ROUND: usize = 5; - // Capacity of broadcast channels - const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; - - let global_state = GlobalState::new( - BLSPubKey::generated_from_seed_indexed([0; 32], 0), - TEST_API_TIMEOUT, - TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT, - TEST_INCLUDED_TX_GC_PERIOD, - CHANNEL_CAPACITY, - TEST_BASE_FEE, - NoHooks(PhantomData), - ); + + let global_state = GlobalState::new(BuilderConfig::test(), NoHooks(PhantomData)); let proxy_global_state = ProxyGlobalState(Arc::clone(&global_state)); let (event_stream_sender, event_stream) = broadcast(1024); diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index df04a163..b28a6c25 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -24,9 +24,13 @@ hotshot-testing = { workspace = true } hotshot-types = { workspace = true } jf-vid = { workspace = true } nonempty-collections = { workspace = true } +once_cell = { workspace = true } +quick_cache = { workspace = true } rand = { workspace = true } serde = { workspace = true } +sha2 = { workspace = true } surf-disco = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } @@ -35,8 +39,9 @@ vbs = { workspace = true } vec1 = { workspace = true } [dev-dependencies] -portpicker = "0.1.1" +portpicker = { workspace = true } tide-disco = { workspace = true } +tracing-test = { workspace = true } [lints] workspace = true diff --git a/crates/shared/src/block.rs b/crates/shared/src/block.rs index e3b509b0..2554a371 100644 --- a/crates/shared/src/block.rs +++ b/crates/shared/src/block.rs @@ -95,10 +95,18 @@ impl std::fmt::Display for BuilderStateId { /// References to the parent block that is extended to spawn the new builder state. #[derive(derive_more::Debug, Clone, PartialEq, Eq)] pub struct ParentBlockReferences { + /// View on which the parent block was proposed pub view_number: Types::View, + /// VID commitment of the parent block payload pub vid_commitment: VidCommitment, + /// Leaf commitment of the parent leaf pub leaf_commit: Commitment>, + /// Builder commitment of the parent block payload pub builder_commitment: BuilderCommitment, + /// Number of transactions included in the parent block + pub tx_count: usize, + /// Last known view that had a block with transactions + pub last_nonempty_view: Option, } impl ParentBlockReferences @@ -111,7 +119,9 @@ where view_number: Types::View::genesis(), vid_commitment: VidCommitment::default(), leaf_commit: fake_commitment(), - builder_commitment: BuilderCommitment::from_bytes([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + tx_count: 0, + last_nonempty_view: None, } } } diff --git a/crates/shared/src/coordinator/mod.rs b/crates/shared/src/coordinator/mod.rs index 92c1d0a1..c7f0974a 100644 --- a/crates/shared/src/coordinator/mod.rs +++ b/crates/shared/src/coordinator/mod.rs @@ -7,9 +7,10 @@ use std::{ use async_broadcast::Sender; use async_lock::{Mutex, RwLock}; -use builder_state_map::BuilderStateMap; +use committable::Commitment; use either::Either; -use hotshot_builder_api::v0_99::builder::BuildError; +use hotshot::traits::BlockPayload; +use hotshot_builder_api::v0_1::builder::TransactionStatus; use hotshot_types::{ data::{DaProposal, QuorumProposal2}, event::LeafInfo, @@ -18,19 +19,24 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; +use quick_cache::sync::Cache; +use tiered_view_map::TieredViewMap; use tracing::{error, info, warn}; use crate::{ block::{BuilderStateId, ParentBlockReferences, ReceivedTransaction}, + error::Error, state::BuilderState, utils::ProposalId, }; -pub mod builder_state_map; +pub mod tiered_view_map; type ProposalMap = HashMap, Either, DaProposal>>; +type BuilderStateMap = TieredViewMap, Arc>>; + /// Result of looking up a builder state by ID. /// /// Different from an [`Option`] as it distinguishes between @@ -78,6 +84,7 @@ where Types: NodeType, { builder_states: RwLock>, + tx_status: quick_cache::sync::Cache, TransactionStatus>, transaction_sender: Sender>>, proposals: Mutex>, } @@ -91,7 +98,12 @@ where /// `txn_garbage_collect_duration` specifies the duration for which the coordinator retains the hashes of transactions /// that have been marked as included by its [`BuilderState`]s. Once this duration has elapsed, new [`BuilderState`]s /// can include duplicates of older transactions should such be received again. - pub fn new(txn_channel_capacity: usize, txn_garbage_collect_duration: Duration) -> Self { + /// `tx_status_cache_capacity` controls the capacity of transaction status + pub fn new( + txn_channel_capacity: usize, + txn_garbage_collect_duration: Duration, + tx_status_cache_capacity: usize, + ) -> Self { let (txn_sender, txn_receiver) = async_broadcast::broadcast(txn_channel_capacity); let bootstrap_state = BuilderState::new( ParentBlockReferences::bootstrap(), @@ -100,12 +112,13 @@ where Types::ValidatedState::default(), ); let mut builder_states = BuilderStateMap::new(); - builder_states.insert(bootstrap_state); + builder_states.insert(bootstrap_state.id(), bootstrap_state); Self { transaction_sender: txn_sender, builder_states: RwLock::new(builder_states), proposals: Mutex::new(ProposalMap::new()), + tx_status: Cache::new(tx_status_cache_capacity), } } @@ -118,15 +131,45 @@ where leaf_chain: Arc>>, ) -> BuilderStateMap { let latest_decide_view_num = leaf_chain[0].leaf.view_number(); - let mut builder_states = self.builder_states.write().await; - let highest_active_view_num = builder_states - .highest_view() - .unwrap_or(Types::View::genesis()); - let cutoff = Types::View::new(*latest_decide_view_num.min(highest_active_view_num)); - builder_states.prune(cutoff) + + for leaf_info in leaf_chain.iter() { + if let Some(payload) = leaf_info.leaf.block_payload() { + for commitment in + payload.transaction_commitments(leaf_info.leaf.block_header().metadata()) + { + self.update_txn_status( + &commitment, + TransactionStatus::Sequenced { + leaf: leaf_info.leaf.block_header().block_number(), + }, + ); + } + } + } + + let pruned = { + let mut builder_states_write_guard = self.builder_states.write().await; + let highest_active_view_num = builder_states_write_guard + .highest_view() + .unwrap_or(Types::View::genesis()); + let cutoff = Types::View::new(*latest_decide_view_num.min(highest_active_view_num)); + tracing::info!( + lowest_view = ?builder_states_write_guard.lowest_view(), + ?cutoff, + highest_view = ?builder_states_write_guard.highest_view(), + "Pruning builder state map" + ); + builder_states_write_guard.prune(cutoff) + }; + tracing::info!(num_states_pruned = pruned.len(), "Pruned builder state map"); + pruned } - /// This function should be called whenever new transactions are received from HotShot. + /// Enqueue new transaction in all builder states managed by this coordinator. + /// + /// Builder states will automatically filter transactions already included from + /// their point of view when dequeing transactions. + /// ///
/// /// Important: [`BuilderState`]s do not automatically remove transactions from the channel. @@ -138,21 +181,33 @@ where pub async fn handle_transaction( &self, transaction: ReceivedTransaction, - ) -> Result<(), BuildError> { - match self.transaction_sender.try_broadcast(Arc::new(transaction)) { - Ok(None) => Ok(()), - Ok(Some(evicted_txn)) => { - warn!( - ?evicted_txn.commit, - "Overflow mode enabled, transaction evicted", - ); - Ok(()) - } + ) -> Result<(), Error> { + let commit = transaction.commit; + + let maybe_evicted = match self.transaction_sender.try_broadcast(Arc::new(transaction)) { + Ok(maybe_evicted) => maybe_evicted, Err(err) => { warn!(?err, "Failed to broadcast txn"); - Err(BuildError::Error(err.to_string())) + self.update_txn_status( + &commit, + TransactionStatus::Rejected { + reason: "Failed to broadcast transaction".to_owned(), + }, + ); + return Err(Error::TxnSender(err)); } + }; + + self.update_txn_status(&commit, TransactionStatus::Pending); + + if let Some(evicted) = maybe_evicted { + warn!( + ?evicted.commit, + "Overflow mode enabled, transaction evicted", + ); } + + Ok(()) } /// This function should be called whenever new DA Proposal is recieved from HotShot. @@ -283,7 +338,10 @@ where .new_child(quorum_proposal.clone(), da_proposal.clone()) .await; - self.builder_states.write().await.insert(child_state); + self.builder_states + .write() + .await + .insert(child_state.id(), child_state); } /// This is an utility function that is used to determine which [`BuilderState`]s @@ -425,18 +483,55 @@ where warn!("View time-travel"); Vec::new() } + + /// Update status of transaction. + pub fn update_txn_status( + &self, + txn_hash: &Commitment<::Transaction>, + new_status: TransactionStatus, + ) { + if let Some(old_status) = self.tx_status.get(txn_hash) { + match old_status { + TransactionStatus::Rejected { .. } | TransactionStatus::Sequenced { .. } => { + tracing::debug!( + ?old_status, + ?new_status, + "Not changing status of rejected/sequenced transaction", + ); + return; + } + _ => { + tracing::debug!(?old_status, ?new_status, "Changing status of transaction",); + } + } + } + self.tx_status.insert(*txn_hash, new_status); + } + + /// Get transaction status for given hash + pub fn tx_status(&self, txn_hash: &Commitment) -> TransactionStatus { + self.tx_status + .get(txn_hash) + .unwrap_or(TransactionStatus::Unknown) + } } +#[cfg_attr(coverage_nightly, coverage(off))] #[cfg(test)] mod tests { use std::time::Instant; + use committable::Committable; use hotshot_example_types::node_types::TestTypes; + use hotshot_types::data::ViewNumber; + use tracing_test::traced_test; use crate::{ block::TransactionSource, testing::{ - constants::{TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD}, + constants::{ + TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD, TEST_TX_STATUS_CACHE_CAPACITY, + }, mock, }, }; @@ -446,9 +541,13 @@ mod tests { type BuilderStateCoordinator = super::BuilderStateCoordinator; #[tokio::test] + #[traced_test] async fn test_coordinator_new() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); assert_eq!( coordinator.builder_states.read().await.len(), @@ -494,9 +593,13 @@ mod tests { } #[tokio::test] + #[traced_test] async fn test_handle_proposal_matching_types_creates_builder_state() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); let (da_proposal, quorum_proposal) = mock::proposals(7).await; @@ -516,9 +619,13 @@ mod tests { } #[tokio::test] + #[traced_test] async fn test_handle_proposal_duplicate_proposal_ignored() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); let (proposal, _) = mock::proposals(7).await; @@ -536,9 +643,13 @@ mod tests { } #[tokio::test] + #[traced_test] async fn test_handle_proposal_stores_new_proposal_when_no_match() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); let (proposal, _) = mock::proposals(1).await; let proposal_id = ProposalId::from_da_proposal(&proposal); @@ -560,9 +671,13 @@ mod tests { } #[tokio::test] + #[traced_test] async fn test_handle_proposal_same_view_different_proposals() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); let view_number = 9; // arbitrary let (da_proposal_1, quorum_proposal_1) = mock::proposals(view_number).await; @@ -594,9 +709,13 @@ mod tests { } #[tokio::test] + #[traced_test] async fn test_decide_reaps_old_proposals() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + let coordinator = BuilderStateCoordinator::new( + TEST_CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); for view in 0..100 { let (da_proposal, quorum_proposal) = mock::proposals(view).await; @@ -635,12 +754,95 @@ mod tests { } #[tokio::test] + #[traced_test] + async fn test_transaction_status() { + // Lower for this test not to spend too much time here + const CHANNEL_BUFFER_SIZE: usize = 32; + + let coordinator = BuilderStateCoordinator::new( + CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); + + let enqueued_transactions = (0..CHANNEL_BUFFER_SIZE) + .map(|_| mock::transaction()) + .collect::>(); + + // Coordinator should update transaction status when included + for tx in enqueued_transactions.iter() { + assert_eq!( + coordinator.tx_status(&tx.commit()), + TransactionStatus::Unknown + ); + coordinator + .handle_transaction(ReceivedTransaction::new( + tx.clone(), + TransactionSource::Public, + )) + .await + .unwrap(); + assert_eq!( + coordinator.tx_status(&tx.commit()), + TransactionStatus::Pending + ); + } + + // This transaction won't be included, we're over capacity + let rejected_transaction = mock::transaction(); + coordinator + .handle_transaction(ReceivedTransaction::new( + rejected_transaction.clone(), + TransactionSource::Public, + )) + .await + .unwrap_err(); + assert!(matches!( + coordinator.tx_status(&rejected_transaction.commit()), + TransactionStatus::Rejected { .. } + )); + + // Transaction that was never submitted to the builder but is going to be + // included anyway, simulating it being included by a different builder + let external_transaction = mock::transaction(); + + let decided_transactions = enqueued_transactions + .iter() + .chain(std::iter::once(&external_transaction)) + .cloned() + .collect::>(); + + // Simulate all transactions being decided + let leaf_chain = mock::decide_leaf_chain_with_transactions( + *ViewNumber::genesis(), + decided_transactions.clone(), + ) + .await; + coordinator.handle_decide(leaf_chain).await; + + // All decided transactions should change status + for tx in decided_transactions { + assert!(matches!( + coordinator.tx_status(&tx.commit()), + TransactionStatus::Sequenced { .. } + )); + } + } + + #[tokio::test] + #[traced_test] async fn test_transaction_overflow() { - let coordinator = - BuilderStateCoordinator::new(TEST_CHANNEL_BUFFER_SIZE, TEST_INCLUDED_TX_GC_PERIOD); + // Lower for this test not to spend too much time here + const CHANNEL_BUFFER_SIZE: usize = 32; + + let coordinator = BuilderStateCoordinator::new( + CHANNEL_BUFFER_SIZE, + TEST_INCLUDED_TX_GC_PERIOD, + TEST_TX_STATUS_CACHE_CAPACITY, + ); // Coordinator should handle transactions while there's space in the buffer - for _ in 0..TEST_CHANNEL_BUFFER_SIZE { + for _ in 0..CHANNEL_BUFFER_SIZE { coordinator .handle_transaction(ReceivedTransaction::new( mock::transaction(), @@ -666,11 +868,11 @@ mod tests { .await .highest_view_builder() .unwrap() - .collect_txns(Instant::now() + Duration::from_millis(100)) + .collect_txns(Instant::now() + Duration::from_secs(10)) // huge duration, we want to clear the whole buffer .await; // After clearing the channel, coordinator should handle transactions again - for _ in 0..TEST_CHANNEL_BUFFER_SIZE { + for _ in 0..CHANNEL_BUFFER_SIZE { coordinator .handle_transaction(ReceivedTransaction::new( mock::transaction(), diff --git a/crates/shared/src/coordinator/builder_state_map.rs b/crates/shared/src/coordinator/tiered_view_map.rs similarity index 60% rename from crates/shared/src/coordinator/builder_state_map.rs rename to crates/shared/src/coordinator/tiered_view_map.rs index a2468788..e357eeee 100644 --- a/crates/shared/src/coordinator/builder_state_map.rs +++ b/crates/shared/src/coordinator/tiered_view_map.rs @@ -1,20 +1,25 @@ -//! This module contains an optimizide implementation of a -//! [`BuilderStateId`] to [`BuilderState`] map. +//! This module contains an optimized implementation of a +//! composite key two-tier map, where the first key is a view +//! number use std::{ collections::{btree_map::Entry, BTreeMap}, + hash::Hash, ops::RangeBounds, - sync::Arc, }; -use hotshot_types::{traits::node_implementation::NodeType, vid::VidCommitment}; +use hotshot_types::{ + traits::node_implementation::{ConsensusTime, NodeType}, + utils::BuilderCommitment, + vid::VidCommitment, +}; use nonempty_collections::{nem, NEMap}; -use crate::{block::BuilderStateId, state::BuilderState}; +use crate::block::{BlockId, BuilderStateId}; -/// A map from [`BuilderStateId`] to [`BuilderState`], implemented as a tiered map -/// with the first tier being [`BTreeMap`] keyed by view number of [`BuilderStateId`] -/// and the second [`NEMap`] keyed by VID commitment of [`BuilderStateId`]. +/// A map from [`ViewCompositeKey`] to arbitrary value, implemented as a tiered map +/// with the first tier being [`BTreeMap`] keyed by view number of [`ViewCompositeKey`] +/// and the second [`NEMap`] keyed by subkey of [`ViewCompositeKey`]. /// /// Usage of [`BTreeMap`] means that the map has an convenient property of always being /// sorted by view number, which makes common operations such as pruning old views more efficient. @@ -22,48 +27,90 @@ use crate::{block::BuilderStateId, state::BuilderState}; /// Second tier being non-empty by construction [`NEMap`] ensures that we can't accidentally /// create phantom entries with empty maps in the first tier. #[derive(Debug)] -pub struct BuilderStateMap( - BTreeMap<::View, NEMap>>>, -); +pub struct TieredViewMap(BTreeMap>) +where + K: ViewCompositeKey; + +/// A two-component key, of which one component is [`ConsensusTime`] +/// +/// See [`TieredViewMap`] documentation for more information +pub trait ViewCompositeKey { + type Subkey: Hash + Eq; + type View: ConsensusTime; + fn view(&self) -> &Self::View; + fn subkey(&self) -> &Self::Subkey; + fn into_subkey(self) -> Self::Subkey; +} + +impl ViewCompositeKey for BlockId { + type Subkey = BuilderCommitment; + type View = Types::View; + + fn view(&self) -> &::View { + &self.view + } + + fn subkey(&self) -> &Self::Subkey { + &self.hash + } + + fn into_subkey(self) -> Self::Subkey { + self.hash + } +} + +impl ViewCompositeKey for BuilderStateId { + type Subkey = VidCommitment; + type View = Types::View; + + fn view(&self) -> &::View { + &self.parent_view + } + + fn subkey(&self) -> &Self::Subkey { + &self.parent_commitment + } -impl BuilderStateMap { + fn into_subkey(self) -> Self::Subkey { + self.parent_commitment + } +} + +impl TieredViewMap +where + K: ViewCompositeKey, +{ /// Create a new empty map pub fn new() -> Self { Self(BTreeMap::new()) } /// Returns an iterator visiting all values in this map - pub fn values(&self) -> impl Iterator>> { + pub fn values(&self) -> impl Iterator { self.0 .values() .flat_map(|bucket| bucket.values().into_iter()) } - /// Returns a nested iterator visiting all [`BuilderState`]s for view numbers in given range - pub fn range( - &self, - range: R, - ) -> impl Iterator>>> + /// Returns a nested iterator visiting all values for view numbers in given range + pub fn range(&self, range: R) -> impl Iterator> where - R: RangeBounds, + R: RangeBounds, { self.0 .range(range) .map(|(_, bucket)| bucket.values().into_iter()) } - /// Returns an iterator visiting all [`BuilderState`]s for given view number - pub fn bucket( - &self, - view_number: &Types::View, - ) -> impl Iterator>> { + /// Returns an iterator visiting all values for given view number + pub fn bucket(&self, view_number: &K::View) -> impl Iterator { self.0 .get(view_number) .into_iter() .flat_map(|bucket| bucket.values().into_iter()) } - /// Returns the number of builder states stored + /// Returns the number of entries in this map pub fn len(&self) -> usize { self.0.values().map(|bucket| bucket.len().get()).sum() } @@ -73,70 +120,82 @@ impl BuilderStateMap { self.0.len() == 0 } - /// Get builder state by ID - pub fn get(&self, key: &BuilderStateId) -> Option<&Arc>> { - self.0.get(&key.parent_view)?.get(&key.parent_commitment) + /// Get reference to a value by key + pub fn get(&self, key: &K) -> Option<&V> { + self.0.get(key.view())?.get(key.subkey()) + } + + /// Get mutable reference to a value by key + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + self.0.get_mut(key.view())?.get_mut(key.subkey()) } - /// Get highest view builder state + /// Get highest view value (no guarantees as to which one exactly + /// if there's multiple stored) /// /// Returns `None` if the map is empty - pub fn highest_view_builder(&self) -> Option<&Arc>> { + pub fn highest_view_builder(&self) -> Option<&V> { Some(&self.0.last_key_value()?.1.head_val) } - /// Insert a new builder state - pub fn insert(&mut self, value: Arc>) { - let key = value.id(); - match self.0.entry(key.parent_view) { + /// Insert a new value + pub fn insert(&mut self, key: K, value: V) { + match self.0.entry(*key.view()) { Entry::Vacant(entry) => { - entry.insert(nem![key.parent_commitment => value]); + entry.insert(nem![key.into_subkey() => value]); } Entry::Occupied(mut entry) => { - entry.get_mut().insert(key.parent_commitment, value); + entry.get_mut().insert(key.into_subkey(), value); } } } - /// Returns highest view number for which we have a builder state - pub fn highest_view(&self) -> Option { + /// Returns highest view number for which we have a value + pub fn highest_view(&self) -> Option { Some(*self.0.last_key_value()?.0) } - /// Returns lowest view number for which we have a builder state - pub fn lowest_view(&self) -> Option { + /// Returns lowest view number for which we have a value + pub fn lowest_view(&self) -> Option { Some(*self.0.first_key_value()?.0) } /// Removes every view lower than the `cutoff_view` (exclusive) from self and returns all removed views. - pub fn prune(&mut self, cutoff_view: Types::View) -> Self { + pub fn prune(&mut self, cutoff_view: K::View) -> Self { let high = self.0.split_off(&cutoff_view); let low = std::mem::replace(&mut self.0, high); Self(low) } } -impl Default for BuilderStateMap { +impl Default for TieredViewMap +where + K: ViewCompositeKey, +{ fn default() -> Self { Self::new() } } +#[cfg_attr(coverage_nightly, coverage(off))] #[cfg(test)] mod tests { - use std::{cmp::Ordering, ops::Bound}; + use std::{cmp::Ordering, ops::Bound, sync::Arc}; - use crate::testing::mock; + use crate::{state::BuilderState, testing::mock}; use super::*; use hotshot_example_types::node_types::TestTypes; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use rand::{distributions::Standard, thread_rng, Rng}; + use tracing_test::traced_test; type View = ViewNumber; - type BuilderStateMap = super::BuilderStateMap; + type BuilderStateMap = + super::TieredViewMap, Arc>>; #[test] + #[traced_test] fn test_new_map() { let new_map = BuilderStateMap::new(); assert!(new_map.is_empty()); @@ -148,13 +207,14 @@ mod tests { } #[test] + #[traced_test] fn test_insert_and_get() { let mut map = BuilderStateMap::new(); let builder_state = mock::builder_state(1); let state_id = builder_state.id(); - map.insert(builder_state.clone()); + map.insert(state_id.clone(), Arc::clone(&builder_state)); assert!(!map.is_empty()); assert_eq!(map.len(), 1); @@ -162,11 +222,13 @@ mod tests { } #[test] + #[traced_test] fn test_range_iteration() { let mut map = BuilderStateMap::new(); for i in 0..5 { - map.insert(mock::builder_state(i)); + let builder_state = mock::builder_state(i); + map.insert(builder_state.id(), builder_state); } let start = View::new(1); @@ -183,6 +245,7 @@ mod tests { } #[test] + #[traced_test] fn test_pruning() { let view_count = 11; let states_per_view = 13; @@ -192,7 +255,8 @@ mod tests { for view in 0..view_count { for _ in 0..states_per_view { - map.insert(mock::builder_state(view)); + let state = mock::builder_state(view); + map.insert(state.id(), state); } } @@ -208,13 +272,15 @@ mod tests { } #[test] + #[traced_test] fn test_highest_and_lowest_view() { let mut map = BuilderStateMap::new(); assert_eq!(map.highest_view(), None); assert_eq!(map.lowest_view(), None); for i in 3..13 { - map.insert(mock::builder_state(i)); + let state = mock::builder_state(i); + map.insert(state.id(), state); } assert_eq!(*map.highest_view().unwrap(), 12); @@ -222,6 +288,7 @@ mod tests { } #[test] + #[traced_test] fn test_highest_view_builder() { let mut map = BuilderStateMap::new(); assert!(map.highest_view_builder().is_none()); @@ -233,7 +300,7 @@ mod tests { .collect(); for state in states.iter() { - map.insert(state.clone()); + map.insert(state.id(), Arc::clone(state)); } states.sort_by(|a, b| { @@ -252,6 +319,7 @@ mod tests { } #[test] + #[traced_test] fn test_iterator() { let mut map = BuilderStateMap::new(); @@ -263,7 +331,7 @@ mod tests { assert_eq!(states.len(), 100); for state in states.iter() { - map.insert(state.clone()); + map.insert(state.id(), Arc::clone(state)); } states.sort_by(compare_builders); diff --git a/crates/shared/src/error.rs b/crates/shared/src/error.rs new file mode 100644 index 00000000..ac2d3b15 --- /dev/null +++ b/crates/shared/src/error.rs @@ -0,0 +1,50 @@ +use std::sync::Arc; + +use async_broadcast::TrySendError; +use hotshot::traits::BlockPayload; +use hotshot_builder_api::v0_99::builder::BuildError; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::BuilderSignatureKey}; +use thiserror::Error; + +use crate::block::ReceivedTransaction; + +#[derive(Error, Debug)] +pub enum Error { + #[error("Signature validation failed")] + SignatureValidation, + #[error(transparent)] + Signing(::SignError), + #[error("API response timed out")] + ApiTimeout, + #[error("Resource not found")] + NotFound, + #[error("Request for an already decided view")] + AlreadyDecided, + #[error(transparent)] + BuildBlock(>::Error), + #[error(transparent)] + TxnSender(TrySendError>>), + #[error("Transaction too big ({len}/{max_tx_len})")] + TxTooBig { len: u64, max_tx_len: u64 }, +} + +impl From> for BuildError { + fn from(value: Error) -> Self { + match value { + Error::SignatureValidation => { + BuildError::Error("Signature validation failed".to_owned()) + } + Error::Signing(_) => BuildError::Error("Failed to sign response".to_owned()), + Error::ApiTimeout => BuildError::Error("Timeout".to_owned()), + Error::NotFound => BuildError::NotFound, + Error::AlreadyDecided => { + BuildError::Error("Request for an already decided view".to_owned()) + } + Error::BuildBlock(_) => BuildError::Error("Failed to build block".to_owned()), + Error::TxnSender(_) => BuildError::Error("Transaction channel error".to_owned()), + Error::TxTooBig { len, max_tx_len } => { + BuildError::Error(format!("Transaction too big ({len}/{max_tx_len}")) + } + } + } +} diff --git a/crates/shared/src/lib.rs b/crates/shared/src/lib.rs index 8b5fc39e..1fde5dc1 100644 --- a/crates/shared/src/lib.rs +++ b/crates/shared/src/lib.rs @@ -2,6 +2,7 @@ pub mod block; pub mod coordinator; +pub mod error; pub mod state; #[cfg_attr(coverage_nightly, coverage(off))] pub mod testing; diff --git a/crates/shared/src/state.rs b/crates/shared/src/state.rs index 971f6a1f..0f3fcf64 100644 --- a/crates/shared/src/state.rs +++ b/crates/shared/src/state.rs @@ -159,6 +159,13 @@ where >::from_bytes(encoded_txns, metadata); let txn_commitments = block_payload.transaction_commitments(metadata); + let last_nonempty_view = if txn_commitments.is_empty() { + self.parent_block_references.last_nonempty_view + } else { + // This block is non-empty + Some(quorum_proposal.view_number) + }; + // We replace our parent_block_references with information from the // quorum proposal. This is identifying the block that this specific // instance of [BuilderState] is attempting to build for. @@ -167,6 +174,8 @@ where vid_commitment: quorum_proposal.block_header.payload_commitment(), leaf_commit: Committable::commit(&leaf), builder_commitment: quorum_proposal.block_header.builder_commitment(), + tx_count: txn_commitments.len(), + last_nonempty_view, }; let mut txn_queue = self.txn_queue.read().await.clone(); diff --git a/crates/shared/src/testing/consensus.rs b/crates/shared/src/testing/consensus.rs new file mode 100644 index 00000000..20f7b6b8 --- /dev/null +++ b/crates/shared/src/testing/consensus.rs @@ -0,0 +1,173 @@ +//! This module defines types used when simulating consensus in tests + +use std::marker::PhantomData; + +use crate::block::BuilderStateId; +use crate::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; +use async_broadcast::Sender; +use committable::Committable; +use hotshot::{ + rand::{seq::SliceRandom, thread_rng}, + traits::BlockPayload, + types::{BLSPubKey, Event, EventType, SignatureKey}, +}; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; +use hotshot_types::{ + data::{DaProposal, Leaf2, QuorumProposal2, ViewNumber}, + drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, + message::Proposal, + simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_vote::QuorumData2, + traits::{block_contents::vid_commitment, node_implementation::ConsensusTime}, +}; +use sha2::{Digest, Sha256}; + +pub struct SimulatedChainState { + round: ViewNumber, + previous_quorum_proposal: Option>, + event_stream_sender: Sender>, +} + +impl SimulatedChainState { + pub fn new(event_stream_sender: Sender>) -> Self { + Self { + round: ViewNumber::genesis(), + previous_quorum_proposal: None, + event_stream_sender, + } + } + + pub async fn simulate_consensus_round( + &mut self, + transactions: Option>, + ) -> BuilderStateId { + let transactions = transactions.unwrap_or_default(); + let num_transactions = transactions.len() as u64; + let encoded_transactions = TestTransaction::encode(&transactions); + let block_payload = TestBlockPayload { transactions }; + let block_vid_commitment = + vid_commitment(&encoded_transactions, TEST_NUM_NODES_IN_VID_COMPUTATION); + let metadata = TestMetadata { num_transactions }; + let block_builder_commitment = + >::builder_commitment( + &block_payload, + &metadata, + ); + + // generate key for leader of this round + let seed = [self.round.u64() as u8; 32]; + let (pub_key, private_key) = BLSPubKey::generated_from_seed_indexed(seed, self.round.u64()); + + let quorum_signature = + ::SignatureKey::sign( + &private_key, + block_vid_commitment.as_ref(), + ) + .expect("Failed to sign payload commitment while preparing Quorum proposal"); + let da_signature = + ::SignatureKey::sign( + &private_key, + Sha256::digest(&encoded_transactions).as_ref(), + ) + .expect("Failed to sign payload commitment while preparing DA proposal"); + + let da_proposal = DaProposal { + encoded_transactions: encoded_transactions.into(), + metadata, + view_number: self.round, + }; + + let block_header = TestBlockHeader { + block_number: self.round.u64(), + payload_commitment: block_vid_commitment, + builder_commitment: block_builder_commitment, + timestamp: self.round.u64(), + metadata, + random: 1, // arbitrary + }; + + let justify_qc = match self.previous_quorum_proposal.as_ref() { + None => QuorumCertificate::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + .to_qc2(), + Some(prev_proposal) => { + let prev_justify_qc = &prev_proposal.justify_qc; + let quorum_data = QuorumData2:: { + leaf_commit: Committable::commit(&Leaf2::from_quorum_proposal(prev_proposal)), + }; + + // form a justify qc + SimpleCertificate::, SuccessThreshold>::new( + quorum_data.clone(), + quorum_data.commit(), + prev_proposal.view_number, + prev_justify_qc.signatures.clone(), + PhantomData, + ) + } + }; + + tracing::debug!("Iteration: {} justify_qc: {:?}", self.round, justify_qc); + + let quorum_proposal = QuorumProposal2:: { + block_header, + view_number: self.round, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + view_change_evidence: None, + drb_seed: INITIAL_DRB_SEED_INPUT, + + drb_result: INITIAL_DRB_RESULT, + }; + + let quorum_proposal_event = EventType::QuorumProposal { + proposal: Proposal { + data: quorum_proposal.clone(), + signature: quorum_signature, + _pd: PhantomData, + }, + sender: pub_key, + }; + + let da_proposal_event = EventType::DaProposal { + proposal: Proposal { + data: da_proposal, + signature: da_signature, + _pd: PhantomData, + }, + sender: pub_key, + }; + + let builder_state_id = BuilderStateId { + parent_commitment: block_vid_commitment, + parent_view: self.round, + }; + + let mut events = vec![quorum_proposal_event, da_proposal_event]; + // Shuffle the events to shake out possible bugs that depend on event ordering + events.shuffle(&mut thread_rng()); + + for evt in events { + self.event_stream_sender + .broadcast(Event { + view_number: self.round, + event: evt, + }) + .await + .unwrap(); + } + + // Update own state + self.round = ViewNumber::new(self.round.u64() + 1); + self.previous_quorum_proposal = Some(quorum_proposal); + + builder_state_id + } +} diff --git a/crates/shared/src/testing/constants.rs b/crates/shared/src/testing/constants.rs index ab89789c..15128a3f 100644 --- a/crates/shared/src/testing/constants.rs +++ b/crates/shared/src/testing/constants.rs @@ -21,7 +21,14 @@ pub const TEST_NUM_CONSENSUS_RETRIES: usize = 4; /// Governs the buffer size used for the test channels. /// All of the channels created need a capacity. The concrete capacity isn't /// specifically bounded in tests, so it is set to an arbitrary value. -pub const TEST_CHANNEL_BUFFER_SIZE: usize = 32; +pub const TEST_CHANNEL_BUFFER_SIZE: usize = 81920; + +/// Governs the target space used by the mapping from txn to its status. +/// This is expressed as a target number of transactions. +/// This is an arbitrary default value for testing. +pub const TEST_TX_STATUS_CACHE_CAPACITY: usize = 10_000_000; + +pub const TEST_MAX_TX_NUM: usize = TEST_TX_STATUS_CACHE_CAPACITY; /// Governs the included transaction GC period used in tests. /// This is an arbitrary default value for testing. @@ -38,8 +45,3 @@ pub const TEST_MAXIMIZE_TX_CAPTURE_TIMEOUT: Duration = Duration::from_millis(100 /// Governs fee per byte used by builders. /// This is an arbitrary default value for testing. pub const TEST_BASE_FEE: u64 = 1; - -/// Governs the target space used by the mapping from txn to its status. -/// This is expressed as a target number of transactions. -/// This is an arbitrary default value for testing. -pub const TEST_MAX_TX_NUM: usize = 1_000_000 * 10; diff --git a/crates/shared/src/testing/mock.rs b/crates/shared/src/testing/mock.rs index 7c60602e..1d9e4945 100644 --- a/crates/shared/src/testing/mock.rs +++ b/crates/shared/src/testing/mock.rs @@ -1,3 +1,4 @@ +//! A collection of generator functions for mock data used in tests use std::{marker::PhantomData, sync::Arc, time::Duration}; use async_broadcast::broadcast; @@ -44,8 +45,21 @@ pub fn transaction() -> TestTransaction { } pub async fn decide_leaf_chain(decided_view: u64) -> Arc>> { - let (_, quorum_proposal) = proposals(decided_view).await; - let leaf = Leaf2::from_quorum_proposal(&quorum_proposal); + decide_leaf_chain_with_transactions(decided_view, vec![transaction()]).await +} + +pub async fn decide_leaf_chain_with_transactions( + decided_view: u64, + transactions: Vec, +) -> Arc>> { + let (da_proposal, quorum_proposal) = + proposals_with_transactions(decided_view, transactions).await; + let mut leaf = Leaf2::from_quorum_proposal(&quorum_proposal); + let payload = >::from_bytes( + &da_proposal.encoded_transactions, + &da_proposal.metadata, + ); + leaf.fill_block_payload_unchecked(payload); Arc::new(vec![LeafInfo { leaf, state: Default::default(), @@ -56,20 +70,28 @@ pub async fn decide_leaf_chain(decided_view: u64) -> Arc /// Create mock pair of DA and Quorum proposals pub async fn proposals(view: u64) -> (DaProposal, QuorumProposal2) { + let transaction = transaction(); + proposals_with_transactions(view, vec![transaction]).await +} + +/// Create mock pair of DA and Quorum proposals with given transactions +pub async fn proposals_with_transactions( + view: u64, + transactions: Vec, +) -> (DaProposal, QuorumProposal2) { let view_number = ::View::new(view); let upgrade_lock = UpgradeLock::::new(); let validated_state = TestValidatedState::default(); let instance_state = TestInstanceState::default(); - let transaction = transaction(); let (payload, metadata) = >::from_transactions( - vec![transaction.clone()], + transactions.clone(), &validated_state, &instance_state, ) .await .unwrap(); - let encoded_transactions = TestTransaction::encode(&[transaction]); + let encoded_transactions = TestTransaction::encode(&transactions); let header = TestBlockHeader::new( &Leaf::::genesis(&Default::default(), &Default::default()) @@ -155,5 +177,7 @@ pub fn parent_references(view: u64) -> ParentBlockReferences { builder_commitment: BuilderCommitment::from_bytes( rng.sample_iter(Standard).take(32).collect::>(), ), + tx_count: rng.gen(), + last_nonempty_view: None, } } diff --git a/crates/shared/src/testing/mod.rs b/crates/shared/src/testing/mod.rs index 6e46c3b6..ef62cf94 100644 --- a/crates/shared/src/testing/mod.rs +++ b/crates/shared/src/testing/mod.rs @@ -8,9 +8,9 @@ use hotshot_testing::{block_builder::TestBuilderImplementation, test_builder::Te use hotshot_types::traits::node_implementation::Versions; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; -use tracing_subscriber::EnvFilter; use validation::BuilderValidationConfig; +pub mod consensus; pub mod constants; pub mod generation; pub mod mock; @@ -59,11 +59,6 @@ pub async fn run_test TestResult { - if self.txn_history.len() <= self.config.expected_txn_num { + if self.txn_history.len() + self.alien_transactions.len() <= self.config.expected_txn_num { return TestResult::Fail(Box::new(format!( "Expected at least {} transactions, got {}", self.config.expected_txn_num, diff --git a/crates/shared/src/utils.rs b/crates/shared/src/utils/event_serivce_wrapper.rs similarity index 76% rename from crates/shared/src/utils.rs rename to crates/shared/src/utils/event_serivce_wrapper.rs index 6a79f162..8a97cef0 100644 --- a/crates/shared/src/utils.rs +++ b/crates/shared/src/utils/event_serivce_wrapper.rs @@ -1,23 +1,14 @@ use std::{future::Future, pin::Pin}; -use std::{ - collections::HashSet, - hash::Hash, - mem, - time::{Duration, Instant}, -}; +use std::time::Duration; use anyhow::Context; use either::Either::{self, Left, Right}; use futures::stream::unfold; use futures::{Stream, StreamExt}; -use hotshot::traits::BlockPayload; use hotshot::types::Event; use hotshot_events_service::events::Error as EventStreamError; -use hotshot_types::data::{DaProposal, QuorumProposal2}; -use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::utils::BuilderCommitment; use surf_disco::client::HealthStatus; use surf_disco::Client; use tokio::time::{sleep, timeout}; @@ -25,75 +16,6 @@ use tracing::{error, warn}; use url::Url; use vbs::version::StaticVersionType; -/// A set that allows for time-based garbage collection, -/// implemented as three sets that are periodically shifted right. -/// Garbage collection is triggered by calling [`Self::rotate`]. -#[derive(Clone, Debug)] -pub struct RotatingSet -where - T: PartialEq + Eq + Hash + Clone, -{ - fresh: HashSet, - stale: HashSet, - expiring: HashSet, - pub last_rotation: Instant, - pub period: Duration, -} - -impl RotatingSet -where - T: PartialEq + Eq + Hash + Clone, -{ - /// Construct a new `RotatingSet` - pub fn new(period: Duration) -> Self { - Self { - fresh: HashSet::new(), - stale: HashSet::new(), - expiring: HashSet::new(), - last_rotation: Instant::now(), - period, - } - } - - /// Returns `true` if the key is contained in the set - pub fn contains(&self, key: &T) -> bool { - self.fresh.contains(key) || self.stale.contains(key) || self.expiring.contains(key) - } - - /// Insert a `key` into the set. Doesn't trigger garbage collection - pub fn insert(&mut self, value: T) { - self.fresh.insert(value); - } - - /// Force garbage collection, even if the time elapsed since - /// the last garbage collection is less than `self.period` - pub fn force_rotate(&mut self) { - let now_stale = mem::take(&mut self.fresh); - let now_expiring = mem::replace(&mut self.stale, now_stale); - self.expiring = now_expiring; - self.last_rotation = Instant::now(); - } - - /// Trigger garbage collection. - pub fn rotate(&mut self) -> bool { - if self.last_rotation.elapsed() > self.period { - self.force_rotate(); - true - } else { - false - } - } -} - -impl Extend for RotatingSet -where - T: PartialEq + Eq + Hash + Clone, -{ - fn extend>(&mut self, iter: I) { - self.fresh.extend(iter) - } -} - type EventServiceConnection = surf_disco::socket::Connection< Event, surf_disco::socket::Unsupported, @@ -208,6 +130,7 @@ impl EventServiceStream -where - Types: NodeType, -{ - view_number: Types::View, - builder_commitment: BuilderCommitment, -} - -impl ProposalId -where - Types: NodeType, -{ - pub fn from_quorum_proposal(proposal: &QuorumProposal2) -> Self { - Self { - builder_commitment: proposal.block_header.builder_commitment(), - view_number: proposal.view_number, - } - } - - pub fn from_da_proposal(proposal: &DaProposal) -> Self { - let builder_commitment = >::from_bytes( - &proposal.encoded_transactions, - &proposal.metadata, - ) - .builder_commitment(&proposal.metadata); - - Self { - builder_commitment, - view_number: proposal.view_number, - } - } - - pub fn view_number(&self) -> ::View { - self.view_number - } - - pub fn builder_commitment(&self) -> &BuilderCommitment { - &self.builder_commitment - } -} diff --git a/crates/shared/src/utils/mod.rs b/crates/shared/src/utils/mod.rs new file mode 100644 index 00000000..e826a4ab --- /dev/null +++ b/crates/shared/src/utils/mod.rs @@ -0,0 +1,115 @@ +use std::{future::Future, hash::Hash, sync::Arc}; + +use futures::{ + future::{RemoteHandle, Shared}, + FutureExt, +}; +use hotshot::traits::BlockPayload; +use hotshot_types::{ + data::{DaProposal, QuorumProposal2}, + traits::{ + block_contents::BlockHeader, node_implementation::NodeType, + signature_key::BuilderSignatureKey, + }, + utils::BuilderCommitment, +}; +use tokio::{spawn, sync::Notify}; + +pub mod rotating_set; +pub use rotating_set::RotatingSet; + +pub mod event_serivce_wrapper; +pub use event_serivce_wrapper::EventServiceStream; + +/// A convenience type alias for a tuple of builder keys +/// `(public_key, private_key)` +pub type BuilderKeys = ( + ::BuilderSignatureKey, + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, +); + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub struct ProposalId +where + Types: NodeType, +{ + view_number: Types::View, + builder_commitment: BuilderCommitment, +} + +impl ProposalId +where + Types: NodeType, +{ + pub fn from_quorum_proposal(proposal: &QuorumProposal2) -> Self { + Self { + builder_commitment: proposal.block_header.builder_commitment(), + view_number: proposal.view_number, + } + } + + pub fn from_da_proposal(proposal: &DaProposal) -> Self { + let builder_commitment = >::from_bytes( + &proposal.encoded_transactions, + &proposal.metadata, + ) + .builder_commitment(&proposal.metadata); + + Self { + builder_commitment, + view_number: proposal.view_number, + } + } + + pub fn view_number(&self) -> ::View { + self.view_number + } + + pub fn builder_commitment(&self) -> &BuilderCommitment { + &self.builder_commitment + } +} + +#[derive(derive_more::Debug, Clone)] +pub struct WaitAndKeep +where + T: Clone + Sync + Send + 'static, +{ + handle: Shared>, + notifier: Arc, +} + +impl WaitAndKeep +where + T: Clone + Sync + Send + 'static, +{ + /// Creates a new [`WaitAndKeep`] wrapping the provided future. + /// Future will be essentially paused until [`WaitAndKeep::start`] is called + pub fn new + Send + 'static>(fut: F) -> Self { + let notifier = Arc::new(Notify::new()); + let (fut, handle) = { + let notifier = Arc::clone(¬ifier); + async move { + let _ = notifier.notified().await; + fut.await + } + .remote_handle() + }; + spawn(fut); + Self { + notifier, + handle: handle.shared(), + } + } + + /// Signals the underlying future to start running + pub fn start(&self) { + self.notifier.notify_one(); + } + + /// Will consume self and return result of underlying future + pub async fn resolve(self) -> T { + self.start(); + self.handle.await + } +} diff --git a/crates/shared/src/utils/rotating_set.rs b/crates/shared/src/utils/rotating_set.rs new file mode 100644 index 00000000..ab980cc3 --- /dev/null +++ b/crates/shared/src/utils/rotating_set.rs @@ -0,0 +1,136 @@ +use std::{ + collections::HashSet, + hash::Hash, + mem, + time::{Duration, Instant}, +}; + +/// A set that allows for time-based garbage collection, +/// implemented as three sets that are periodically shifted right. +/// Garbage collection is triggered by calling [`Self::rotate`]. +#[derive(Clone, Debug)] +pub struct RotatingSet +where + T: PartialEq + Eq + Hash + Clone, +{ + fresh: HashSet, + stale: HashSet, + expiring: HashSet, + pub last_rotation: Instant, + pub period: Duration, +} + +impl RotatingSet +where + T: PartialEq + Eq + Hash + Clone, +{ + /// Construct a new `RotatingSet` + pub fn new(period: Duration) -> Self { + Self { + fresh: HashSet::new(), + stale: HashSet::new(), + expiring: HashSet::new(), + last_rotation: Instant::now(), + period, + } + } + + /// Returns `true` if the key is contained in the set + pub fn contains(&self, key: &T) -> bool { + self.fresh.contains(key) || self.stale.contains(key) || self.expiring.contains(key) + } + + /// Insert a `key` into the set. Doesn't trigger garbage collection + pub fn insert(&mut self, value: T) { + self.fresh.insert(value); + } + + /// Force garbage collection, even if the time elapsed since + /// the last garbage collection is less than `self.period` + pub fn force_rotate(&mut self) { + let now_stale = mem::take(&mut self.fresh); + let now_expiring = mem::replace(&mut self.stale, now_stale); + self.expiring = now_expiring; + self.last_rotation = Instant::now(); + } + + /// Trigger garbage collection. + pub fn rotate(&mut self) -> bool { + if self.last_rotation.elapsed() > self.period { + self.force_rotate(); + true + } else { + false + } + } +} + +impl Extend for RotatingSet +where + T: PartialEq + Eq + Hash + Clone, +{ + fn extend>(&mut self, iter: I) { + self.fresh.extend(iter) + } +} + +#[cfg_attr(coverage_nightly, coverage(off))] +#[cfg(test)] +mod tests { + use tracing_test::traced_test; + + use super::*; + use std::thread::sleep; + + #[test] + #[traced_test] + fn test_insert_and_contains() { + let mut set = RotatingSet::new(Duration::from_secs(1)); + set.insert(1); + assert!(set.contains(&1)); + assert!(!set.contains(&2)); + } + + #[test] + #[traced_test] + fn test_rotate_and_contains() { + let mut set = RotatingSet::new(Duration::from_micros(10)); + set.insert(1); + + // Immediately after insertion, item should be in the set + assert!(set.contains(&1)); + + // Wait for longer than the rotation period + sleep(Duration::from_micros(15)); + + // Rotate and check if the item is still in the set + assert!(set.rotate()); + assert!(set.contains(&1)); + + // Shouldn't rotate this time + assert!(!set.rotate()); + + // Wait and rotate again to check if it moves to `expiring` + sleep(Duration::from_micros(15)); + assert!(set.rotate()); + assert!(set.contains(&1)); + + // Wait and rotate again to check if it gets removed + sleep(Duration::from_micros(15)); + assert!(set.rotate()); + assert!(!set.contains(&1)); + } + + #[test] + #[traced_test] + fn test_force_rotate() { + let mut set = RotatingSet::new(Duration::MAX); + set.insert(1); + set.force_rotate(); + assert!(set.contains(&1)); + set.force_rotate(); + assert!(set.contains(&1)); + set.force_rotate(); + assert!(!set.contains(&1)); + } +} diff --git a/flake.nix b/flake.nix index 6422ea66..d8ae6885 100644 --- a/flake.nix +++ b/flake.nix @@ -33,14 +33,14 @@ let overlays = [ (import rust-overlay) ]; pkgs = import nixpkgs { inherit system overlays; }; - rustToolchain = pkgs.rust-bin.stable.latest.minimal.override { - extensions = [ - "rustfmt" - "clippy" - "llvm-tools-preview" - "rust-src" - ]; - }; + extensions = [ + "rustfmt" + "clippy" + "llvm-tools-preview" + "rust-src" + ]; + rustToolchain = pkgs.rust-bin.stable.latest.minimal.override { inherit extensions; }; + rustToolchainNightly = pkgs.rust-bin.nightly."2024-09-01".default.override { inherit extensions; }; cargo-llvm-cov = if pkgs.stdenv.isDarwin then (pkgs.cargo-llvm-cov.overrideAttrs (_: { @@ -68,6 +68,7 @@ cargo-udeps cargo-sort cargo-nextest + cargo-mutants cmake ] @@ -155,7 +156,7 @@ perfShell = pkgs.mkShell { inherit shellHook; buildInputs = [ - rustToolchain + rustToolchainNightly cargo-llvm-cov ] ++ rustDeps;