diff --git a/.github/ISSUE_TEMPLATE/EXTERNAL_ISSUE_FORM.yml b/.github/ISSUE_TEMPLATE/EXTERNAL_ISSUE_FORM.yml index c37983e8d3..bf2657b95a 100644 --- a/.github/ISSUE_TEMPLATE/EXTERNAL_ISSUE_FORM.yml +++ b/.github/ISSUE_TEMPLATE/EXTERNAL_ISSUE_FORM.yml @@ -24,7 +24,7 @@ body: placeholder: | Include other details here such as: open questions, directions to reproduce a bug, relevant error logs, etc. - E.g. To reproduce this bug run `just async_std test_basic`. You should see logs similar to the ones below: + E.g. To reproduce this bug run `just test_basic`. You should see logs similar to the ones below: `ERROR: This is an important error!` validations: required: false diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4fbeb81866..221dd5f345 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,6 +15,10 @@ updates: - "*" exclude-patterns: - "cdn-*" + - "ark-*" cdn: patterns: - "cdn-*" + ark: + patterns: + - "ark-*" diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 0aacdac8ea..d8836d871d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -18,9 +18,6 @@ jobs: test: strategy: matrix: - just_variants: - - async-std - - tokio test_suites: - test-ci-1 - test-ci-2 @@ -41,31 +38,22 @@ jobs: name: Enable Rust Caching with: shared-key: "test" - prefix-key: ${{ matrix.just_variants }} cache-on-failure: "true" save-if: ${{ github.ref == 'refs/heads/main' && matrix.test_suites == 'test-ci-rest' }} - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - uses: taiki-e/install-action@nextest - name: Unit and integration tests for all crates in workspace run: | - just ${{ matrix.just_variants }} ${{ matrix.test_suites }} + just ${{ matrix.test_suites }} timeout-minutes: 60 env: RUST_BACKTRACE: full test-examples: strategy: - matrix: - just_variants: - - async-std - - tokio fail-fast: false runs-on: ubuntu-latest steps: @@ -79,27 +67,18 @@ jobs: name: Enable Rust Caching with: shared-key: "examples" - prefix-key: ${{ matrix.just_variants }} cache-on-failure: "true" save-if: ${{ github.ref == 'refs/heads/main' }} - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - name: Test examples run: | - just ${{ matrix.just_variants }} example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml + just example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml timeout-minutes: 20 build-release: strategy: - matrix: - just_variants: - - async-std - - tokio fail-fast: false runs-on: ubuntu-latest steps: @@ -113,39 +92,30 @@ jobs: name: Enable Rust Caching with: shared-key: "build-release" - prefix-key: ${{ matrix.just_variants }} cache-on-failure: "true" save-if: ${{ github.ref == 'refs/heads/main' }} - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - name: Build examples in release mode - run: just ${{ matrix.just_variants }} build_release --examples --package hotshot-examples --no-default-features + run: just build_release --examples --package hotshot-examples --no-default-features - name: Upload Binaries uses: actions/upload-artifact@v4 with: - name: binaries-amd64-${{ matrix.just_variants }} + name: binaries-amd64 path: | - target/${{ matrix.just_variants }}/release/examples/counter - target/${{ matrix.just_variants }}/release/examples/multi-validator-libp2p - target/${{ matrix.just_variants }}/release/examples/validator-libp2p - target/${{ matrix.just_variants }}/release/examples/validator-combined - target/${{ matrix.just_variants }}/release/examples/validator-push-cdn - target/${{ matrix.just_variants }}/release/examples/orchestrator - target/${{ matrix.just_variants }}/release/examples/cdn-broker - target/${{ matrix.just_variants }}/release/examples/cdn-marshal + target/release/examples/counter + target/release/examples/multi-validator-libp2p + target/release/examples/validator-libp2p + target/release/examples/validator-combined + target/release/examples/validator-push-cdn + target/release/examples/orchestrator + target/release/examples/cdn-broker + target/release/examples/cdn-marshal build-arm-release: strategy: - matrix: - just_variants: - - async-std - - tokio fail-fast: false runs-on: buildjet-4vcpu-ubuntu-2204-arm if: ${{ github.ref == 'refs/heads/main' }} @@ -158,33 +128,28 @@ jobs: name: Enable Rust Caching with: shared-key: "build-arm-release" - prefix-key: ${{ matrix.just_variants }} cache-on-failure: "true" save-if: ${{ github.ref == 'refs/heads/main' }} - name: Build examples in release mode - run: just ${{ matrix.just_variants }} build_release --examples --package hotshot-examples --no-default-features + run: just build_release --examples --package hotshot-examples --no-default-features - name: Upload Binaries uses: actions/upload-artifact@v4 with: - name: binaries-aarch64-${{ matrix.just_variants }} + name: binaries-aarch64 path: | - target/${{ matrix.just_variants }}/release/examples/counter - target/${{ matrix.just_variants }}/release/examples/multi-validator-libp2p - target/${{ matrix.just_variants }}/release/examples/validator-libp2p - target/${{ matrix.just_variants }}/release/examples/validator-combined - target/${{ matrix.just_variants }}/release/examples/validator-push-cdn - target/${{ matrix.just_variants }}/release/examples/orchestrator - target/${{ matrix.just_variants }}/release/examples/cdn-broker - target/${{ matrix.just_variants }}/release/examples/cdn-marshal + target/release/examples/counter + target/release/examples/multi-validator-libp2p + target/release/examples/validator-libp2p + target/release/examples/validator-combined + target/release/examples/validator-push-cdn + target/release/examples/orchestrator + target/release/examples/cdn-broker + target/release/examples/cdn-marshal build-dockers: strategy: - matrix: - just_variants: - - async-std - - tokio fail-fast: false runs-on: ubuntu-latest if: ${{ github.ref == 'refs/heads/main' }} @@ -207,56 +172,50 @@ jobs: - name: Download AMD executables uses: actions/download-artifact@v4 with: - name: binaries-amd64-${{ matrix.just_variants }} - path: target/${{ matrix.just_variants }}/amd64/release/examples + name: binaries-amd64 + path: target/amd64/release/examples - name: Download ARM executables uses: actions/download-artifact@v4 with: - name: binaries-aarch64-${{ matrix.just_variants }} - path: target/${{ matrix.just_variants }}/arm64/release/examples + name: binaries-aarch64 + path: target/arm64/release/examples - name: Generate validator-libp2p docker metadata uses: docker/metadata-action@v5 id: validator-libp2p with: images: ghcr.io/espressosystems/hotshot/validator-libp2p - flavor: suffix=-${{ matrix.just_variants }} - name: Generate validator-combined docker metadata uses: docker/metadata-action@v5 id: validator-combined with: images: ghcr.io/espressosystems/hotshot/validator-combined - flavor: suffix=-${{ matrix.just_variants }} - name: Generate validator-push-cdn docker metadata uses: docker/metadata-action@v5 id: validator-push-cdn with: images: ghcr.io/espressosystems/hotshot/validator-push-cdn - flavor: suffix=-${{ matrix.just_variants }} - name: Generate orchestrator docker metadata uses: docker/metadata-action@v5 id: orchestrator with: images: ghcr.io/espressosystems/hotshot/orchestrator - flavor: suffix=-${{ matrix.just_variants }} - name: Generate cdn-broker docker metadata uses: docker/metadata-action@v5 id: cdn-broker with: images: ghcr.io/espressosystems/hotshot/cdn-broker - flavor: suffix=-${{ matrix.just_variants }} - name: Generate cdn-marshal docker metadata uses: docker/metadata-action@v5 id: cdn-marshal with: images: ghcr.io/espressosystems/hotshot/cdn-marshal - flavor: suffix=-${{ matrix.just_variants }} - name: Build and push validator-libp2p docker uses: docker/build-push-action@v6 @@ -269,8 +228,6 @@ jobs: labels: ${{ steps.validator-libp2p.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} - name: Build and push validator-combined docker uses: docker/build-push-action@v6 @@ -283,8 +240,6 @@ jobs: labels: ${{ steps.validator-combined.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} - name: Build and push validator-push-cdn docker uses: docker/build-push-action@v6 @@ -297,8 +252,6 @@ jobs: labels: ${{ steps.validator-push-cdn.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} - name: Build and push orchestrator docker uses: docker/build-push-action@v6 @@ -311,8 +264,6 @@ jobs: labels: ${{ steps.orchestrator.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} - name: Build and push cdn-broker docker uses: docker/build-push-action@v6 @@ -325,8 +276,6 @@ jobs: labels: ${{ steps.cdn-broker.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} - name: Build and push cdn-marshal docker uses: docker/build-push-action@v6 @@ -339,5 +288,3 @@ jobs: labels: ${{ steps.cdn-marshal.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max - build-args: | - ASYNC_EXECUTOR=${{ matrix.just_variants }} diff --git a/.github/workflows/build-without-lockfile.yml b/.github/workflows/build-without-lockfile.yml new file mode 100644 index 0000000000..632e882608 --- /dev/null +++ b/.github/workflows/build-without-lockfile.yml @@ -0,0 +1,37 @@ +name: Build without committed Cargo.lock + +on: + push: + branches: + - main + - release-* + tags: + # YYYYMMDD + - "20[0-9][0-9][0-1][0-9][0-3][0-9]*" + schedule: + - cron: "0 0 * * 1" + pull_request: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-ignore-lockfile: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Enable Rust Caching + uses: Swatinem/rust-cache@v2 + with: + prefix-key: v1-rust + + - name: Build without committed Cargo.lock + run: | + cargo generate-lockfile + cargo check --all-targets diff --git a/.github/workflows/build_nix.yml b/.github/workflows/build_nix.yml index 4546cee028..f34421b8fe 100644 --- a/.github/workflows/build_nix.yml +++ b/.github/workflows/build_nix.yml @@ -45,4 +45,4 @@ jobs: # sanity check that repository builds with nix - name: Build run: | - nix develop -c just async-std build + nix develop -c just build diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 5d4606c541..3bbef19ffa 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -21,11 +21,7 @@ jobs: - name: Checkout Repository uses: actions/checkout@v4 - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - uses: Swatinem/rust-cache@v2 name: Enable Rust Caching @@ -35,16 +31,16 @@ jobs: - name: Test Docs run: | - just async-std doc_test + just doc_test - name: Build Docs run: | - just async-std doc + just doc - name: Create documentation if: ${{ github.ref == 'refs/heads/main' }} run: | - cp -R target/async-std/doc public + cp -R target/doc public echo '' > public/index.html - name: Deploy diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 45de5cead7..23f6a6810f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,10 +17,6 @@ concurrency: jobs: clippy: strategy: - matrix: - just_variants: - - async-std - - tokio fail-fast: false runs-on: ubuntu-latest steps: @@ -34,18 +30,13 @@ jobs: name: Enable Rust Caching with: shared-key: "lint" - prefix-key: ${{ matrix.just_variants }} save-if: ${{ github.ref == 'refs/heads/main' }} - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - name: Run clippy run: | - just ${{ matrix.just_variants }} clippy + just clippy fmt: runs-on: ubuntu-latest @@ -56,12 +47,8 @@ jobs: - name: Install Rust uses: mkroening/rust-toolchain-toml@main - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - name: Check rustfmt run: | - just ${{ matrix.just_variants }} fmt_check + just fmt_check diff --git a/.github/workflows/semver-check.yml b/.github/workflows/semver-check.yml index 5d5551db74..a8e00d4418 100644 --- a/.github/workflows/semver-check.yml +++ b/.github/workflows/semver-check.yml @@ -16,11 +16,7 @@ jobs: test-sequencer: runs-on: ubuntu-latest name: semver - strategy: - matrix: - async_runtimes: - - async-std - - tokio + steps: - uses: actions/checkout@v4 name: Checkout Repository @@ -33,11 +29,7 @@ jobs: path: baseline ref: ${{ inputs.baseline }} - - name: Install Just - run: | - wget https://github.com/casey/just/releases/download/1.14.0/just-1.14.0-x86_64-unknown-linux-musl.tar.gz - tar -vxf just-1.14.0-x86_64-unknown-linux-musl.tar.gz just - sudo cp just /usr/bin/just + - uses: taiki-e/install-action@just - name: Install Rust uses: mkroening/rust-toolchain-toml@main @@ -46,7 +38,6 @@ jobs: name: Enable Rust Caching with: shared-key: "build-and-test" - prefix-key: ${{ matrix.just_variants }} save-if: false - name: Install cargo-semver-checks and cargo-workspaces @@ -57,5 +48,5 @@ jobs: - name: Run cargo-semver-checks run: | cd current - just ${{matrix.async_runtimes}} semver --baseline-root ../baseline + just semver --baseline-root ../baseline diff --git a/.github/workflows/test-sequencer.yml b/.github/workflows/test-sequencer.yml index ace9831be5..2850764bd8 100644 --- a/.github/workflows/test-sequencer.yml +++ b/.github/workflows/test-sequencer.yml @@ -63,11 +63,9 @@ jobs: - name: Build sequencer tests working-directory: sequencer run: | - export RUSTFLAGS='--cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" --cfg hotshot_example' cargo test --release --workspace --all-features --no-run - name: Run sequencer tests working-directory: sequencer run: | - export RUSTFLAGS='--cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" --cfg hotshot_example' cargo test --release --workspace --all-features --verbose -- --test-threads 1 --nocapture diff --git a/.vscode/settings.json.example b/.vscode/settings.json.example deleted file mode 100644 index 891c599eff..0000000000 --- a/.vscode/settings.json.example +++ /dev/null @@ -1,6 +0,0 @@ -{ - "rust-analyzer.server.extraEnv": { - "RUSTFLAGS": "--cfg async_executor_impl=\"async-std\" --cfg async_channel_impl=\"async-std\" --cfg hotshot_example", - "CARGO_TARGET_DIR": "vsc/target" - }, -} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8522f9456d..8bcdeb6baf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,7 @@ Adds new type parameter, corresponding to the state type, to Message ### Features - StatefulHandler trait - Reexport traits from traits module -- State Machine + NodeImplementation +- State Machine + Node Implementation - state machine mvp megasquash - Replace tokio broadcast queue with unbounded equivalent diff --git a/Cargo.lock b/Cargo.lock index 0c69b1f5c4..358e89a2fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aead" @@ -126,9 +126,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -141,49 +141,49 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -329,7 +329,7 @@ dependencies = [ "num-traits", "paste", "rayon", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "zeroize", ] @@ -469,84 +469,51 @@ dependencies = [ ] [[package]] -name = "arrayref" -version = "0.3.7" +name = "arraydeque" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] -name = "arrayvec" -version = "0.7.4" +name = "arrayref" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] -name = "asn1-rs" -version = "0.5.2" +name = "arrayvec" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl 0.1.0", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time 0.3.36", -] +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "asn1-rs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" dependencies = [ - "asn1-rs-derive 0.5.0", - "asn1-rs-impl 0.2.0", + "asn1-rs-derive", + "asn1-rs-impl", "displaydoc", "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time 0.3.36", ] [[package]] name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure 0.12.6", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "synstructure 0.13.1", ] -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "asn1-rs-impl" version = "0.2.0" @@ -555,7 +522,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -583,7 +550,7 @@ dependencies = [ "event-listener 5.3.1", "event-listener-strategy", "futures-core", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -606,27 +573,7 @@ dependencies = [ "concurrent-queue", "event-listener-strategy", "futures-core", - "pin-project-lite 0.2.14", -] - -[[package]] -name = "async-compatibility-layer" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32dd1dfd4a05a197583e51036d9615f04a4d851089dc119ee965d440d0bcaa39" -dependencies = [ - "async-lock 3.4.0", - "async-std", - "async-trait", - "color-eyre", - "console-subscriber 0.2.0", - "flume", - "futures", - "tokio", - "tokio-stream", - "tracing", - "tracing-error", - "tracing-subscriber 0.3.18", + "pin-project-lite 0.2.15", ] [[package]] @@ -641,29 +588,17 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8828ec6e544c02b0d6691d21ed9f9218d0384a82542855073c2a3f58304aaf0" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", - "futures-lite 2.3.0", + "fastrand 2.1.1", + "futures-lite 2.4.0", "slab", ] -[[package]] -name = "async-fs" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "blocking", - "futures-lite 1.13.0", -] - [[package]] name = "async-global-executor" version = "2.4.1" @@ -672,10 +607,10 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.3", + "async-io 2.3.4", "async-lock 3.4.0", "blocking", - "futures-lite 2.3.0", + "futures-lite 2.4.0", "once_cell", "tokio", ] @@ -719,21 +654,21 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite 2.4.0", "parking", - "polling 3.7.1", - "rustix 0.38.34", + "polling 3.7.4", + "rustix 0.38.39", "slab", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -753,7 +688,7 @@ checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ "event-listener 5.3.1", "event-listener-strategy", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -764,54 +699,45 @@ checksum = "9e9e7a929bd34c68a82d58a4de7f86fffdaf97fb2af850162a7bb19dd7269b33" dependencies = [ "async-std", "native-tls", - "thiserror", + "thiserror 1.0.68", "url", ] -[[package]] -name = "async-net" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0434b1ed18ce1cf5769b8ac540e33f01fa9471058b5e89da9e06f3c882a8c12f" -dependencies = [ - "async-io 1.13.0", - "blocking", - "futures-lite 1.13.0", -] - [[package]] name = "async-process" -version = "1.8.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ - "async-io 1.13.0", - "async-lock 2.8.0", + "async-channel 2.3.1", + "async-io 2.3.4", + "async-lock 3.4.0", "async-signal", + "async-task", "blocking", "cfg-if", - "event-listener 3.1.0", - "futures-lite 1.13.0", - "rustix 0.38.34", - "windows-sys 0.48.0", + "event-listener 5.3.1", + "futures-lite 2.4.0", + "rustix 0.38.39", + "tracing", ] [[package]] name = "async-signal" -version = "0.2.7" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "329972aa325176e89114919f2a80fdae4f4c040f66a370b1a1159c6c0f94e7aa" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io 2.3.3", + "async-io 2.3.4", "async-lock 3.4.0", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix 0.38.34", + "rustix 0.38.39", "signal-hook-registry", "slab", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -830,67 +756,52 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io 2.3.4", + "async-lock 3.4.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite 2.4.0", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "pin-utils", "slab", "wasm-bindgen-futures", ] -[[package]] -name = "async-std-resolver" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3b454643291f9a4a3bbdb35fa62efa4ba7be5ea13fe243e3be4352182ff4b8" -dependencies = [ - "async-std", - "async-trait", - "futures-io", - "futures-util", - "hickory-resolver", - "pin-utils", - "socket2 0.5.7", -] - [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -920,7 +831,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -935,7 +846,7 @@ dependencies = [ "futures-io", "futures-util", "log", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tungstenite", ] @@ -949,7 +860,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -962,7 +873,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -993,9 +904,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "automod" @@ -1005,7 +916,7 @@ checksum = "edf3ee19dbc0a46d740f6f0926bde8c50f02bdbc7b536842da28f6ac56513a8b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1021,13 +932,13 @@ dependencies = [ "futures-util", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.31", "itoa", "matchit", "memchr", "mime", "percent-encoding", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "rustversion", "serde", "sync_wrapper 0.1.2", @@ -1055,17 +966,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -1115,16 +1026,14 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", @@ -1132,8 +1041,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", - "which", + "syn 2.0.87", ] [[package]] @@ -1144,9 +1052,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] @@ -1213,15 +1121,15 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite 2.4.0", "piper", ] [[package]] name = "blst" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62dc83a094a71d43eeadd254b1ec2d24cb6a0bb6cadce00df51f0db594711a32" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" dependencies = [ "cc", "glob", @@ -1274,9 +1182,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] @@ -1301,18 +1209,18 @@ dependencies = [ [[package]] name = "cbor4ii" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" +checksum = "472931dd4dfcc785075b09be910147f9c6258883fc4591d0dac6116392b2daa6" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.1.13" +version = "1.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" dependencies = [ "shlex", ] @@ -1322,13 +1230,12 @@ name = "cdn-broker" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.3#f4c54c9055b9f958eacf7c5b1759a559910893cc" dependencies = [ - "async-std", "cdn-proto", "clap", - "console-subscriber 0.3.0", + "console-subscriber", "dashmap", "derivative", - "jf-signature", + "jf-signature 0.1.0", "lazy_static", "local-ip-address", "parking_lot", @@ -1346,10 +1253,9 @@ name = "cdn-client" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.3#f4c54c9055b9f958eacf7c5b1759a559910893cc" dependencies = [ - "async-std", "cdn-proto", "clap", - "jf-signature", + "jf-signature 0.1.0", "parking_lot", "rand 0.8.5", "tokio", @@ -1362,10 +1268,9 @@ name = "cdn-marshal" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.5.3#f4c54c9055b9f958eacf7c5b1759a559910893cc" dependencies = [ - "async-std", "cdn-proto", "clap", - "jf-signature", + "jf-signature 0.1.0", "tokio", "tracing", "tracing-subscriber 0.3.18", @@ -1382,22 +1287,22 @@ dependencies = [ "capnp", "capnpc", "derivative", - "jf-signature", + "jf-signature 0.1.0", "kanal", "lazy_static", "mnemonic", "num_enum", "pem", "prometheus", - "quinn 0.11.5", + "quinn", "rand 0.8.5", "rcgen 0.13.1", "redis", "rkyv", - "rustls 0.23.7", + "rustls 0.23.16", "rustls-pki-types", "sqlx", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-rustls", "tracing", @@ -1420,6 +1325,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -1481,59 +1392,32 @@ version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] -[[package]] -name = "color-eyre" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" -dependencies = [ - "backtrace", - "color-spantrace", - "eyre", - "indenter", - "once_cell", - "owo-colors", - "tracing-error", -] - -[[package]] -name = "color-spantrace" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" -dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", -] - [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1544,7 +1428,7 @@ dependencies = [ "bytes", "futures-core", "memchr", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tokio", "tokio-util", ] @@ -1578,14 +1462,13 @@ dependencies = [ [[package]] name = "config" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +checksum = "68578f196d2a33ff61b27fae256c3164f65e36382648e30666dde05b8cc9dfdf" dependencies = [ "async-trait", "convert_case 0.6.0", "json5", - "lazy_static", "nom", "pathdiff", "ron", @@ -1593,20 +1476,7 @@ dependencies = [ "serde", "serde_json", "toml", - "yaml-rust", -] - -[[package]] -name = "console-api" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787" -dependencies = [ - "futures-core", - "prost", - "prost-types", - "tonic 0.10.2", - "tracing-core", + "yaml-rust2", ] [[package]] @@ -1618,32 +1488,8 @@ dependencies = [ "futures-core", "prost", "prost-types", - "tonic 0.11.0", - "tracing-core", -] - -[[package]] -name = "console-subscriber" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e" -dependencies = [ - "console-api 0.6.0", - "crossbeam-channel", - "crossbeam-utils", - "futures-task", - "hdrhistogram", - "humantime", - "prost-types", - "serde", - "serde_json", - "thread_local", - "tokio", - "tokio-stream", - "tonic 0.10.2", - "tracing", + "tonic", "tracing-core", - "tracing-subscriber 0.3.18", ] [[package]] @@ -1652,7 +1498,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31c4cc54bae66f7d9188996404abdf7fdfa23034ef8e43478c8810828abad758" dependencies = [ - "console-api 0.7.0", + "console-api", "crossbeam-channel", "crossbeam-utils", "futures-task", @@ -1665,7 +1511,7 @@ dependencies = [ "thread_local", "tokio", "tokio-stream", - "tonic 0.11.0", + "tonic", "tracing", "tracing-core", "tracing-subscriber 0.3.18", @@ -1705,9 +1551,9 @@ checksum = "373e9fafaa20882876db20562275ff58d50e0caa2590077fe7ce7bef90211d0d" [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -1753,9 +1599,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core2" @@ -1768,9 +1614,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1893,9 +1739,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -1932,7 +1778,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1945,7 +1791,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1989,7 +1835,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2000,7 +1846,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2051,7 +1897,7 @@ checksum = "bc2323e10c92e1cf4d86e11538512e6dc03ceb586842970b6332af3d4046a046" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2065,27 +1911,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -2116,13 +1948,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2143,7 +1975,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2153,7 +1985,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2165,8 +1997,8 @@ dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 2.0.85", + "rustc_version 0.4.1", + "syn 2.0.87", ] [[package]] @@ -2186,7 +2018,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2254,7 +2086,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2290,11 +2122,6 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" -[[package]] -name = "dyn-clone" -version = "1.0.17" -source = "git+https://github.com/dtolnay/dyn-clone?tag=1.0.17#51bf8816be5a73e38b59fd4d9dda2bc18e9c2429" - [[package]] name = "ed25519" version = "2.2.3" @@ -2322,9 +2149,9 @@ dependencies = [ [[package]] name = "edit-distance" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbaaaf38131deb9ca518a274a45bfdb8771f139517b073b16c2d3d32ae5037b" +checksum = "e3f497e87b038c09a155dfd169faa5ec940d0644635555ef6bd464ac20e97397" [[package]] name = "either" @@ -2343,23 +2170,23 @@ checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2435,17 +2262,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite 0.2.14", -] - [[package]] name = "event-listener" version = "5.3.1" @@ -2454,7 +2270,7 @@ checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -2464,17 +2280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.3.1", - "pin-project-lite 0.2.14", -] - -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", + "pin-project-lite 0.2.15", ] [[package]] @@ -2488,9 +2294,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fiat-crypto" @@ -2511,9 +2317,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -2521,13 +2327,12 @@ dependencies = [ [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", - "nanorand", "spin 0.9.8", ] @@ -2654,21 +2459,21 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "waker-fn", ] [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "3f1fa2f9765705486b33fd2acf1577f8ec449c2ba1f318ae5447697b7c08d210" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] @@ -2679,17 +2484,18 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "futures-rustls" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.21.11", + "rustls 0.23.16", + "rustls-pki-types", ] [[package]] @@ -2734,7 +2540,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "pin-utils", "slab", ] @@ -2774,10 +2580,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -2792,9 +2596,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -2804,9 +2608,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -2826,7 +2630,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2835,9 +2639,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -2845,7 +2649,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2882,15 +2686,24 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hashlink" version = "0.9.1" @@ -2937,12 +2750,6 @@ dependencies = [ "http 0.2.12", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -2955,6 +2762,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -2991,7 +2804,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "socket2 0.5.7", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tokio", "tracing", @@ -3014,7 +2827,7 @@ dependencies = [ "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -3104,9 +2917,7 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bimap", "bincode", @@ -3127,7 +2938,7 @@ dependencies = [ "hotshot-task", "hotshot-task-impls", "hotshot-types", - "jf-signature", + "jf-signature 0.2.0", "libp2p-identity", "libp2p-networking", "lru 0.12.5", @@ -3138,7 +2949,7 @@ dependencies = [ "serde", "sha2 0.10.8", "surf-disco", - "thiserror", + "thiserror 2.0.3", "time 0.3.36", "tokio", "toml", @@ -3162,7 +2973,7 @@ dependencies = [ "hotshot-types", "serde", "tagged-base64", - "thiserror", + "thiserror 2.0.3", "tide-disco", "toml", "vbs", @@ -3174,9 +2985,7 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bitvec", "committable", @@ -3188,12 +2997,13 @@ dependencies = [ "hotshot-task", "hotshot-task-impls", "hotshot-types", + "jf-vid", "rand 0.8.5", "reqwest", "serde", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 2.0.3", "time 0.3.36", "tokio", "tracing", @@ -3207,9 +3017,7 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bimap", "blake3", @@ -3240,7 +3048,7 @@ dependencies = [ "serde", "sha2 0.10.8", "surf-disco", - "thiserror", + "thiserror 2.0.3", "time 0.3.36", "tokio", "toml", @@ -3256,8 +3064,7 @@ name = "hotshot-fakeapi" version = "0.5.79" dependencies = [ "anyhow", - "async-compatibility-layer", - "async-lock 2.8.0", + "async-lock 3.4.0", "async-trait", "futures", "hotshot-example-types", @@ -3265,6 +3072,7 @@ dependencies = [ "rand 0.8.5", "serde", "tide-disco", + "tokio", "toml", "tracing", "vbs", @@ -3277,7 +3085,7 @@ dependencies = [ "derive_builder", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3285,9 +3093,7 @@ name = "hotshot-orchestrator" version = "0.5.79" dependencies = [ "anyhow", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "bincode", "blake3", "clap", @@ -3299,7 +3105,7 @@ dependencies = [ "serde", "serde_json", "surf-disco", - "thiserror", + "thiserror 2.0.3", "tide-disco", "tokio", "toml", @@ -3322,7 +3128,7 @@ dependencies = [ "hotshot-types", "jf-crhf", "jf-rescue", - "jf-signature", + "jf-signature 0.2.0", "jf-utils", "rand_chacha 0.3.1", "serde", @@ -3335,8 +3141,6 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-std", "async-trait", "futures", "tokio", @@ -3350,9 +3154,7 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bincode", "bitvec", @@ -3364,14 +3166,15 @@ dependencies = [ "hotshot-builder-api", "hotshot-task", "hotshot-types", - "jf-signature", + "jf-signature 0.2.0", "jf-vid", + "lru 0.12.5", "rand 0.8.5", "serde", "sha2 0.10.8", "surf-disco", "tagged-base64", - "thiserror", + "thiserror 2.0.3", "time 0.3.36", "tokio", "tracing", @@ -3387,9 +3190,7 @@ version = "0.5.79" dependencies = [ "anyhow", "async-broadcast", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "automod", "bitvec", @@ -3407,7 +3208,7 @@ dependencies = [ "hotshot-task-impls", "hotshot-types", "itertools 0.13.0", - "jf-signature", + "jf-signature 0.2.0", "jf-vid", "lru 0.12.5", "portpicker", @@ -3417,7 +3218,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "tagged-base64", - "thiserror", + "thiserror 2.0.3", "tide-disco", "tokio", "tracing", @@ -3437,9 +3238,7 @@ dependencies = [ "ark-serialize", "ark-srs", "ark-std", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bincode", "bitvec", @@ -3451,13 +3250,13 @@ dependencies = [ "derivative", "digest 0.10.7", "displaydoc", - "dyn-clone 1.0.17 (git+https://github.com/dtolnay/dyn-clone?tag=1.0.17)", + "dyn-clone", "either", "espresso-systems-common", "ethereum-types", "futures", "jf-pcs", - "jf-signature", + "jf-signature 0.2.0", "jf-utils", "jf-vid", "lazy_static", @@ -3473,7 +3272,7 @@ dependencies = [ "sha2 0.10.8", "surf-disco", "tagged-base64", - "thiserror", + "thiserror 2.0.3", "time 0.3.36", "tokio", "toml", @@ -3515,14 +3314,14 @@ checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.12", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", ] [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -3530,15 +3329,15 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", + "futures-util", "http 1.1.0", - "http-body 1.0.0", - "pin-project-lite 0.2.14", + "http-body 1.0.1", + "pin-project-lite 0.2.15", ] [[package]] @@ -3566,7 +3365,7 @@ dependencies = [ "cookie", "futures-lite 1.13.0", "infer", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "rand 0.7.3", "serde", "serde_json", @@ -3577,9 +3376,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3595,9 +3394,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -3609,7 +3408,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "socket2 0.5.7", "tokio", "tower-service", @@ -3619,19 +3418,19 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "itoa", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "smallvec", "tokio", "want", @@ -3639,15 +3438,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", - "rustls 0.23.7", + "rustls 0.23.16", "rustls-pki-types", "tokio", "tokio-rustls", @@ -3660,8 +3459,8 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.29", - "pin-project-lite 0.2.14", + "hyper 0.14.31", + "pin-project-lite 0.2.15", "tokio", "tokio-io-timeout", ] @@ -3674,7 +3473,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -3684,36 +3483,35 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.3.1", - "pin-project-lite 0.2.14", + "http-body 1.0.1", + "hyper 1.5.0", + "pin-project-lite 0.2.15", "socket2 0.5.7", "tokio", - "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -3758,6 +3556,124 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -3776,12 +3692,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -3800,7 +3727,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io 2.3.3", + "async-io 2.3.4", "core-foundation", "fnv", "futures", @@ -3808,7 +3735,6 @@ dependencies = [ "ipnet", "log", "rtnetlink", - "smol", "system-configuration 0.5.1", "tokio", "windows", @@ -3825,7 +3751,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.31", "log", "rand 0.8.5", "tokio", @@ -3844,29 +3770,23 @@ dependencies = [ [[package]] name = "include_dir" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" dependencies = [ "include_dir_macros", ] [[package]] name = "include_dir_macros" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" dependencies = [ "proc-macro2", "quote", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -3880,12 +3800,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.1", "serde", ] @@ -3919,7 +3839,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -3938,15 +3858,15 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -4082,7 +4002,7 @@ dependencies = [ "derivative", "displaydoc", "downcast-rs", - "dyn-clone 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "dyn-clone", "hashbrown 0.14.5", "itertools 0.12.1", "jf-utils", @@ -4144,6 +4064,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "jf-signature" +version = "0.2.0" +source = "git+https://github.com/EspressoSystems/jellyfish?tag=jf-signature-v0.2.0#ca160ce3452b560cad512b750a742a87c48c5881" +dependencies = [ + "ark-bls12-381", + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "blst", + "derivative", + "digest 0.10.7", + "displaydoc", + "hashbrown 0.14.5", + "itertools 0.12.1", + "jf-crhf", + "jf-relation", + "jf-rescue", + "jf-utils", + "num-bigint", + "num-traits", + "serde", + "sha3", + "tagged-base64", + "zeroize", +] + [[package]] name = "jf-utils" version = "0.4.4" @@ -4191,9 +4140,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -4246,23 +4195,17 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.155" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4270,9 +4213,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p" @@ -4305,7 +4248,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4355,15 +4298,14 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.41.2" +version = "0.41.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8130a8269e65a2554d55131c770bdf4bcd94d2b8d4efb24ca23699be65066c05" +checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" dependencies = [ "either", "fnv", "futures", "futures-timer", - "instant", "libp2p-identity", "multiaddr", "multihash", @@ -4376,10 +4318,11 @@ dependencies = [ "rw-stream-sink", "serde", "smallvec", - "thiserror", + "thiserror 1.0.68", "tracing", "unsigned-varint 0.8.0", "void", + "web-time", ] [[package]] @@ -4388,7 +4331,6 @@ version = "0.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" dependencies = [ - "async-std-resolver", "async-trait", "futures", "hickory-resolver", @@ -4449,7 +4391,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", - "thiserror", + "thiserror 1.0.68", "tracing", "void", ] @@ -4470,7 +4412,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.68", "tracing", "zeroize", ] @@ -4499,7 +4441,7 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", - "thiserror", + "thiserror 1.0.68", "tracing", "uint", "void", @@ -4511,8 +4453,6 @@ version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" dependencies = [ - "async-io 2.3.3", - "async-std", "data-encoding", "futures", "hickory-proto", @@ -4551,9 +4491,7 @@ name = "libp2p-networking" version = "0.5.79" dependencies = [ "anyhow", - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", + "async-lock 3.4.0", "async-trait", "bincode", "blake3", @@ -4563,6 +4501,7 @@ dependencies = [ "derive_builder", "either", "futures", + "hotshot-example-types", "hotshot-types", "lazy_static", "libp2p", @@ -4574,21 +4513,21 @@ dependencies = [ "serde", "serde_bytes", "serde_json", - "thiserror", + "thiserror 2.0.3", "tide", "tokio", "tokio-stream", "tracing", + "tracing-subscriber 0.3.18", "void", ] [[package]] name = "libp2p-quic" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0375cdfee57b47b313ef1f0fdb625b78aed770d33a40cf1c294a371ff5e6666" +checksum = "c67296ad4e092e23f92aea3d2bdb6f24eab79c0929ed816dfb460ea2f4567d2b" dependencies = [ - "async-std", "bytes", "futures", "futures-timer", @@ -4597,12 +4536,12 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "parking_lot", - "quinn 0.10.2", + "quinn", "rand 0.8.5", - "ring 0.16.20", - "rustls 0.21.11", + "ring 0.17.8", + "rustls 0.23.16", "socket2 0.5.7", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -4635,7 +4574,6 @@ version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" dependencies = [ - "async-std", "either", "fnv", "futures", @@ -4660,10 +4598,10 @@ version = "0.34.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4672,7 +4610,6 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" dependencies = [ - "async-io 1.13.0", "futures", "futures-timer", "if-watch", @@ -4686,20 +4623,20 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ce7e3c2e7569d685d08ec795157981722ff96e9e9f9eae75df3c29d02b07a5" +checksum = "72b7b831e55ce2aa6c354e6861a85fdd4dd0a2b97d5e276fabac0e4810a71776" dependencies = [ "futures", "futures-rustls", "libp2p-core", "libp2p-identity", "rcgen 0.11.3", - "ring 0.16.20", - "rustls 0.21.11", + "ring 0.17.8", + "rustls 0.23.16", "rustls-webpki 0.101.7", - "thiserror", - "x509-parser 0.15.1", + "thiserror 1.0.68", + "x509-parser", "yasna", ] @@ -4725,7 +4662,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", ] @@ -4806,6 +4743,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "local-ip-address" version = "0.6.3" @@ -4814,7 +4757,7 @@ checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", - "thiserror", + "thiserror 1.0.68", "windows-sys 0.59.0", ] @@ -4830,9 +4773,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "value-bag", ] @@ -4852,7 +4795,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -4916,7 +4859,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4931,9 +4874,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoize" @@ -4978,9 +4921,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -4994,20 +4937,20 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", @@ -5021,9 +4964,9 @@ checksum = "f2b8f3a258db515d5e91a904ce4ae3f73e091149b90cadbdb93d210bee07f63b" [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -5034,7 +4977,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "url", ] @@ -5051,13 +4994,13 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" dependencies = [ "core2", "serde", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -5074,15 +5017,6 @@ dependencies = [ "unsigned-varint 0.7.2", ] -[[package]] -name = "nanorand" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" -dependencies = [ - "getrandom 0.2.15", -] - [[package]] name = "native-tls" version = "0.2.12" @@ -5160,7 +5094,7 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -5174,7 +5108,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.68", "tokio", ] @@ -5184,7 +5118,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ - "async-io 1.13.0", "bytes", "futures", "libc", @@ -5292,7 +5225,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] @@ -5314,41 +5247,32 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "object" -version = "0.32.2" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - -[[package]] -name = "oid-registry" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "opaque-debug" @@ -5358,11 +5282,11 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -5379,7 +5303,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5390,9 +5314,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -5408,12 +5332,12 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-multimap" -version = "0.6.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79" dependencies = [ "dlv-list", - "hashbrown 0.13.2", + "hashbrown 0.14.5", ] [[package]] @@ -5422,17 +5346,11 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "owo-colors" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" - [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -5452,7 +5370,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.1", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -5465,9 +5383,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" [[package]] name = "pem" @@ -5496,20 +5414,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.68", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" dependencies = [ "pest", "pest_generator", @@ -5517,22 +5435,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "pest_meta" -version = "2.7.10" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" dependencies = [ "once_cell", "pest", @@ -5541,22 +5459,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5567,9 +5485,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5585,12 +5503,12 @@ checksum = "d15b6607fa632996eb8a17c9041cb6071cb75ac057abd45dece578723ea8c7c0" [[package]] name = "piper" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-io", ] @@ -5617,9 +5535,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" @@ -5633,23 +5551,23 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "windows-sys 0.48.0", ] [[package]] name = "polling" -version = "3.7.1" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6a007746f34ed64099e88783b0ae369eaa3da6392868ba262e2af9b8fbaea1" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi", - "pin-project-lite 0.2.14", - "rustix 0.38.34", + "hermit-abi 0.4.0", + "pin-project-lite 0.2.15", + "rustix 0.38.39", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5680,18 +5598,21 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5707,11 +5628,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -5764,14 +5685,14 @@ dependencies = [ "memchr", "parking_lot", "protobuf", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "prometheus-client" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -5787,7 +5708,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5810,7 +5731,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5872,7 +5793,7 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "quick-protobuf", - "thiserror", + "thiserror 1.0.68", "unsigned-varint 0.7.2", ] @@ -5885,65 +5806,29 @@ dependencies = [ "asynchronous-codec 0.7.0", "bytes", "quick-protobuf", - "thiserror", + "thiserror 1.0.68", "unsigned-varint 0.8.0", ] [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ - "async-io 1.13.0", - "async-std", "bytes", "futures-io", - "pin-project-lite 0.2.14", - "quinn-proto 0.10.6", - "quinn-udp 0.4.1", - "rustc-hash 1.1.0", - "rustls 0.21.11", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "quinn" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" -dependencies = [ - "bytes", - "pin-project-lite 0.2.14", - "quinn-proto 0.11.8", - "quinn-udp 0.5.4", + "pin-project-lite 0.2.15", + "quinn-proto", + "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.7", + "rustls 0.23.16", "socket2 0.5.7", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] -[[package]] -name = "quinn-proto" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" -dependencies = [ - "bytes", - "rand 0.8.5", - "ring 0.16.20", - "rustc-hash 1.1.0", - "rustls 0.21.11", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - [[package]] name = "quinn-proto" version = "0.11.8" @@ -5954,37 +5839,25 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.7", + "rustls 0.23.16", "slab", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tracing", ] [[package]] name = "quinn-udp" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" -dependencies = [ - "bytes", - "libc", - "socket2 0.5.7", - "tracing", - "windows-sys 0.48.0", -] - -[[package]] -name = "quinn-udp" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6115,7 +5988,7 @@ dependencies = [ "ring 0.17.8", "rustls-pki-types", "time 0.3.36", - "x509-parser 0.16.0", + "x509-parser", "yasna", ] @@ -6134,7 +6007,7 @@ dependencies = [ "itoa", "num-bigint", "percent-encoding", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "ryu", "tokio", "tokio-retry2", @@ -6144,43 +6017,34 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -6194,13 +6058,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", ] [[package]] @@ -6211,9 +6075,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -6226,20 +6090,20 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-tls", "hyper-util", @@ -6250,13 +6114,13 @@ dependencies = [ "native-tls", "once_cell", "percent-encoding", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", - "system-configuration 0.6.0", + "system-configuration 0.6.1", "tokio", "tokio-native-tls", "tower-service", @@ -6309,9 +6173,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec", "bytecheck", @@ -6327,9 +6191,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2", "quote", @@ -6343,7 +6207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.5.0", + "bitflags 2.6.0", "serde", "serde_derive", ] @@ -6390,21 +6254,20 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" dependencies = [ - "async-global-executor", "futures", "log", "netlink-packet-route", "netlink-proto", "nix", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "rust-ini" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" +checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a" dependencies = [ "cfg-if", "ordered-multimap", @@ -6445,9 +6308,9 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.23", ] @@ -6477,11 +6340,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.14", @@ -6497,65 +6360,39 @@ dependencies = [ "base64 0.13.1", "log", "ring 0.16.20", - "sct 0.6.1", + "sct", "webpki", ] [[package]] name = "rustls" -version = "0.21.11" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", - "ring 0.17.8", - "rustls-webpki 0.101.7", - "sct 0.7.1", -] - -[[package]] -name = "rustls" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring 0.17.8", - "rustls-pki-types", - "rustls-webpki 0.102.3", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebbbdb961df0ad3f2652da8f3fdc4b36122f568f968f45ad3316f26c025c677b" -dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.3", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -6569,9 +6406,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.3" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -6580,9 +6417,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rw-stream-sink" @@ -6603,11 +6440,11 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6632,16 +6469,6 @@ dependencies = [ "untrusted 0.7.1", ] -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "seahash" version = "4.1.0" @@ -6650,11 +6477,11 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -6663,9 +6490,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -6694,9 +6521,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -6709,7 +6536,7 @@ checksum = "b3acbd21cb24261fc36f595b38d3b34d0ff4e31a6b42edd6a43387d27c5787c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6723,13 +6550,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6761,14 +6588,14 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -6787,15 +6614,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -6805,14 +6632,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6850,9 +6677,9 @@ dependencies = [ [[package]] name = "sha1_smol" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" [[package]] name = "sha2" @@ -6955,9 +6782,9 @@ dependencies = [ [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "slab" @@ -6997,23 +6824,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "smol" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" -dependencies = [ - "async-channel 1.9.0", - "async-executor", - "async-fs", - "async-io 1.13.0", - "async-lock 2.8.0", - "async-net", - "async-process", - "blocking", - "futures-lite 1.13.0", -] - [[package]] name = "snafu" version = "0.8.5" @@ -7029,10 +6839,10 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7082,20 +6892,19 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ - "itertools 0.12.1", "nom", "unicode_categories", ] [[package]] name = "sqlx" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" dependencies = [ "sqlx-core", "sqlx-macros", @@ -7106,9 +6915,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" dependencies = [ "atoi", "byteorder", @@ -7123,9 +6932,9 @@ dependencies = [ "futures-io", "futures-util", "hashbrown 0.14.5", - "hashlink", + "hashlink 0.9.1", "hex", - "indexmap 2.2.6", + "indexmap 2.6.0", "log", "memchr", "once_cell", @@ -7136,7 +6945,7 @@ dependencies = [ "sha2 0.10.8", "smallvec", "sqlformat", - "thiserror", + "thiserror 1.0.68", "time 0.3.36", "tokio", "tokio-stream", @@ -7146,26 +6955,26 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ "proc-macro2", "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "sqlx-macros-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" dependencies = [ "dotenvy", "either", - "heck 0.5.0", + "heck", "hex", "once_cell", "proc-macro2", @@ -7177,7 +6986,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.85", + "syn 2.0.87", "tempfile", "tokio", "url", @@ -7185,13 +6994,13 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" +checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "bytes", "crc", @@ -7220,7 +7029,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.68", "time 0.3.36", "tracing", "whoami", @@ -7228,13 +7037,13 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" +checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" dependencies = [ "atoi", "base64 0.22.1", - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "crc", "dotenvy", @@ -7259,7 +7068,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror", + "thiserror 1.0.68", "time 0.3.36", "tracing", "whoami", @@ -7267,9 +7076,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" dependencies = [ "atoi", "flume", @@ -7289,6 +7098,12 @@ dependencies = [ "url", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "standback" version = "0.2.17" @@ -7372,9 +7187,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" [[package]] name = "strum_macros" @@ -7382,18 +7197,18 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "surf-disco" @@ -7416,15 +7231,15 @@ dependencies = [ [[package]] name = "sval" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53eb957fbc79a55306d5d25d87daf3627bc3800681491cda0709eef36c748bfe" +checksum = "f6dc0f9830c49db20e73273ffae9b5240f63c42e515af1da1fceefb69fceafd8" [[package]] name = "sval_buffer" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e860aef60e9cbf37888d4953a13445abf523c534640d1f6174d310917c410d" +checksum = "429922f7ad43c0ef8fd7309e14d750e38899e32eb7e8da656ea169dd28ee212f" dependencies = [ "sval", "sval_ref", @@ -7432,18 +7247,18 @@ dependencies = [ [[package]] name = "sval_dynamic" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3f2b07929a1127d204ed7cb3905049381708245727680e9139dac317ed556f" +checksum = "68f16ff5d839396c11a30019b659b0976348f3803db0626f736764c473b50ff4" dependencies = [ "sval", ] [[package]] name = "sval_fmt" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4e188677497de274a1367c4bda15bd2296de4070d91729aac8f0a09c1abf64d" +checksum = "c01c27a80b6151b0557f9ccbe89c11db571dc5f68113690c1e028d7e974bae94" dependencies = [ "itoa", "ryu", @@ -7452,9 +7267,9 @@ dependencies = [ [[package]] name = "sval_json" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f456c07dae652744781f2245d5e3b78e6a9ebad70790ac11eb15dbdbce5282" +checksum = "0deef63c70da622b2a8069d8600cf4b05396459e665862e7bdb290fd6cf3f155" dependencies = [ "itoa", "ryu", @@ -7463,9 +7278,9 @@ dependencies = [ [[package]] name = "sval_nested" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "886feb24709f0476baaebbf9ac10671a50163caa7e439d7a7beb7f6d81d0a6fb" +checksum = "a39ce5976ae1feb814c35d290cf7cf8cd4f045782fe1548d6bc32e21f6156e9f" dependencies = [ "sval", "sval_buffer", @@ -7474,18 +7289,18 @@ dependencies = [ [[package]] name = "sval_ref" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2e7fc517d778f44f8cb64140afa36010999565528d48985f55e64d45f369ce" +checksum = "bb7c6ee3751795a728bc9316a092023529ffea1783499afbc5c66f5fabebb1fa" dependencies = [ "sval", ] [[package]] name = "sval_serde" -version = "2.13.0" +version = "2.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79bf66549a997ff35cd2114a27ac4b0c2843280f2cfa84b240d169ecaa0add46" +checksum = "2a5572d0321b68109a343634e3a5d576bf131b82180c6c442dee06349dfc652a" dependencies = [ "serde", "sval", @@ -7505,9 +7320,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -7549,7 +7364,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7565,11 +7380,11 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bc6ee10a9b4fcf576e9b0819d95ec16f4d2c02d39fd83ac1c8789785c4a42" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "system-configuration-sys 0.6.0", ] @@ -7628,34 +7443,55 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", - "fastrand 2.1.0", - "rustix 0.38.34", - "windows-sys 0.52.0", + "fastrand 2.1.1", + "once_cell", + "rustix 0.38.39", + "windows-sys 0.59.0", ] [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +dependencies = [ + "thiserror-impl 1.0.68", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ - "thiserror-impl", + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7692,7 +7528,7 @@ dependencies = [ "http-types", "kv-log-macro", "log", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "route-recognizer", "serde", "serde_json", @@ -7849,11 +7685,21 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -7866,17 +7712,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", "libc", "mio", "parking_lot", - "pin-project-lite 0.2.14", - "signal-hook-registry", + "pin-project-lite 0.2.15", "socket2 0.5.7", "tokio-macros", "tracing", @@ -7889,7 +7734,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tokio", ] @@ -7901,7 +7746,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7931,7 +7776,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.7", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -7943,93 +7788,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", "futures-sink", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tokio", ] [[package]] name = "toml" -version = "0.8.15" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.16", + "toml_edit", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" -dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.11", -] - -[[package]] -name = "tonic" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64 0.21.7", - "bytes", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.29", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "tokio", - "tokio-stream", - "tower", - "tower-layer", - "tower-service", - "tracing", + "winnow", ] [[package]] @@ -8046,7 +7853,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.31", "hyper-timeout", "percent-encoding", "pin-project", @@ -8069,7 +7876,7 @@ dependencies = [ "futures-util", "indexmap 1.9.3", "pin-project", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "rand 0.8.5", "slab", "tokio", @@ -8081,15 +7888,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -8098,7 +7905,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.14", + "pin-project-lite 0.2.15", "tracing-attributes", "tracing-core", ] @@ -8111,7 +7918,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -8136,16 +7943,6 @@ dependencies = [ "tracing-subscriber 0.3.18", ] -[[package]] -name = "tracing-error" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" -dependencies = [ - "tracing", - "tracing-subscriber 0.3.18", -] - [[package]] name = "tracing-futures" version = "0.2.5" @@ -8229,7 +8026,7 @@ dependencies = [ "native-tls", "rand 0.8.5", "sha-1", - "thiserror", + "thiserror 1.0.68", "url", "utf-8", ] @@ -8246,9 +8043,9 @@ dependencies = [ [[package]] name = "typeid" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" +checksum = "0e13db2e0ccd5e14a544e8a246ba2312cd25223f616442d7f2cb0e3db614236e" [[package]] name = "typenum" @@ -8258,9 +8055,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -8276,51 +8073,48 @@ dependencies = [ [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-properties" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -8368,29 +8162,28 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.9.6" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f214ce18d8b2cbe84ed3aa6486ed3f5b285cf8d8fbdbce9f3f767a724adc35" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "flate2", "log", "once_cell", - "rustls 0.22.4", + "rustls 0.23.16", "rustls-pki-types", - "rustls-webpki 0.102.3", "url", - "webpki-roots 0.26.2", + "webpki-roots 0.26.6", ] [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -8401,11 +8194,23 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "utils" @@ -8416,9 +8221,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.8.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" [[package]] name = "valuable" @@ -8428,9 +8233,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -8438,9 +8243,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccacf50c5cb077a9abb723c5bcb5e0754c1a433f1e1de89edc328e2760b6328b" +checksum = "4bb773bd36fd59c7ca6e336c94454d9c66386416734817927ac93d81cb3c5b0b" dependencies = [ "erased-serde", "serde", @@ -8449,9 +8254,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1785bae486022dfb9703915d42287dcb284c1ee37bd1080eeba78cc04721285b" +checksum = "53a916a702cac43a88694c97657d449775667bcd14b70419441d05b7fea4a83a" dependencies = [ "sval", "sval_buffer", @@ -8491,9 +8296,9 @@ dependencies = [ [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "void" @@ -8527,7 +8332,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.31", "log", "mime", "mime_guess", @@ -8563,11 +8368,12 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", + "once_cell", "serde", "serde_json", "wasm-bindgen-macro", @@ -8575,24 +8381,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -8602,9 +8408,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8612,28 +8418,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -8660,32 +8476,20 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.2" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.34", -] - [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall", "wasite", ] @@ -8723,7 +8527,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows-core", + "windows-core 0.51.1", "windows-targets 0.48.5", ] @@ -8736,6 +8540,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-registry" version = "0.2.0" @@ -8916,18 +8729,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.11" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c52728401e1dc672a56e81e593e912aa54c78f40246869f78359a2bf24d29d" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -8942,6 +8746,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -8951,46 +8767,29 @@ dependencies = [ "tap", ] -[[package]] -name = "x509-parser" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" -dependencies = [ - "asn1-rs 0.5.2", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time 0.3.36", -] - [[package]] name = "x509-parser" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "data-encoding", - "der-parser 9.0.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.7.0", + "oid-registry", "ring 0.17.8", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time 0.3.36", ] [[package]] name = "xml-rs" -version = "0.8.20" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmltree" @@ -9002,12 +8801,14 @@ dependencies = [ ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "yaml-rust2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "8902160c4e6f2fb145dbe9d6760a75e3c9522d8bf796ed7047c85919ac7115f8" dependencies = [ - "linked-hash-map", + "arraydeque", + "encoding_rs", + "hashlink 0.8.4", ] [[package]] @@ -9019,24 +8820,70 @@ dependencies = [ "time 0.3.36", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure 0.13.1", +] + [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure 0.13.1", ] [[package]] @@ -9056,5 +8903,27 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", ] diff --git a/Cargo.toml b/Cargo.toml index ce276fd907..73a80ba2e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,20 +12,21 @@ repository = "https://github.com/EspressoSystems/HotShot" # when implementing traits externally [workspace] members = [ + "crates/builder-api", + "crates/example-types", + "crates/examples", + "crates/fakeapi", "crates/hotshot", "crates/hotshot-stake-table", "crates/libp2p-networking", "crates/macros", + "crates/orchestrator" +, "crates/task", "crates/task-impls", "crates/testing", - "crates/examples", - "crates/example-types", "crates/types", - "crates/builder-api", - "crates/fakeapi", - "crates/utils", -] + "crates/utils"] resolver = "2" [workspace.dependencies] @@ -35,11 +36,8 @@ ark-ff = "0.4" ark-serialize = "0.4" ark-std = { version = "0.4", default-features = false } async-broadcast = "0.7" -async-compatibility-layer = { version = "1.2.1", default-features = false, features = [ - "logging-utils", -] } -async-lock = "2" -async-std = { version = "1", features = ["attributes"] } +tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } +async-lock = "3" async-trait = "0.1" bincode = "1" bitvec = { version = "1", default-features = false, features = [ @@ -61,7 +59,7 @@ derive_more = { version = "1.0", features = ["from", "deref"] } futures = { version = "0.3", default-features = false } jf-crhf = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } -jf-signature = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } +jf-signature = { git = "https://github.com/EspressoSystems/jellyfish", tag = "jf-signature-v0.2.0" } jf-rescue = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } jf-pcs = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } jf-utils = { version = "0.4.4", git = "https://github.com/espressosystems/jellyfish", tag = "0.4.5" } @@ -77,7 +75,7 @@ serde = { version = "1", features = ["derive"] } serde_bytes = { version = "0.11" } serde_json = { version = "1.0" } sha2 = "0.10" -thiserror = "1" +thiserror = "2" surf-disco = "0.9" tagged-base64 = "0.4" tide-disco = "0.9" @@ -116,18 +114,12 @@ tokio = { version = "1", default-features = false, features = [ ] } anyhow = "1" - # Push CDN imports cdn-client = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.3" } cdn-broker = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.3" } cdn-marshal = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.3" } cdn-proto = { git = "https://github.com/EspressoSystems/Push-CDN", tag = "0.5.3" } - -### Profiles -### -### Note: these only apply to example executables or tests built from within this crate. They have -### no effect on crates that depend on this crate. - +### Profiles###### Note: these only apply to example executables or tests built from within this crate. They have### no effect on crates that depend on this crate. ## Apply some optimizations to test dependencies in debug/test builds [profile.dev] @@ -178,4 +170,4 @@ unexpected_cfgs = { level = "warn", check-cfg = [ 'cfg(async_executor_impl, values("async-std"))', 'cfg(async_executor_impl, values("tokio"))', 'cfg(hotshot_example)', -] } \ No newline at end of file +] } diff --git a/README.md b/README.md index ecb179469d..c641255b0f 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ scoop install protobuf cmake Once dependencies have been installed, to build everything: ```sh -just async_std build +just build ``` @@ -74,7 +74,7 @@ HotShot supports static linking for its examples: # Nix-shell is optional but recommended nix develop .#staticShell -just async_std build +just build ``` # Testing @@ -82,7 +82,7 @@ just async_std build To test: ```sh -RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just async_std test +RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just test ``` - `RUST_LOG=$ERROR_LOG_LEVEL`: The basic levels of logging include `warn`, `error`, `info`. @@ -92,7 +92,7 @@ RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just async_std test To stress test, run the ignored tests prefixed with `test_stress`: ```sh -RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just async_std run_test test_stress +RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just run_test test_stress ``` ## Careful @@ -101,7 +101,7 @@ To double check for UB: ```bash nix develop .#correctnessShell -just async_std careful +just careful ``` ## Testing on CI @@ -109,7 +109,6 @@ just async_std careful To test as if running on CI, one must limit the number of cores and ram to match github runners (2 core, 7 gig ram). To limit the ram, spin up a virtual machine or container with 7 gigs ram. To limit the core count when running tests: ``` -ASYNC_STD_THREAD_COUNT=1 RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just async_std test ASYNC_STD_THREAD_COUNT=1 RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just tokio test ``` @@ -202,7 +201,6 @@ For espresso developers we have written up a description of our workflow [here]( Choose an async runtime to use before launching a text editor. This may be done by setting the environment RUSTFLAGS. For example: ``` -export RUSTFLAGS='--cfg async_executor_impl="tokio" --cfg async_channel_impl="tokio"' # export RUSTFLAGS so the editor is aware of extra flags nvim # launch text editor of choice. We choose neovim in this example unset RUSTFLAGS # Unset rustflags so we may continue to use the justfile. The justfile sets these particular config options ``` diff --git a/crates/builder-api/Cargo.toml b/crates/builder-api/Cargo.toml index 45fc2e89be..1e129d7f13 100644 --- a/crates/builder-api/Cargo.toml +++ b/crates/builder-api/Cargo.toml @@ -2,19 +2,18 @@ name = "hotshot-builder-api" version = "0.1.7" edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] async-trait = { workspace = true } clap = { workspace = true } +committable = { workspace = true } derive_more = { workspace = true } futures = { workspace = true } hotshot-types = { path = "../types" } serde = { workspace = true } -thiserror = { workspace = true } tagged-base64 = { workspace = true } +thiserror = { workspace = true } tide-disco = { workspace = true } toml = { workspace = true } -committable = { workspace = true } vbs = { workspace = true } diff --git a/crates/builder-api/api/v0_1/builder.toml b/crates/builder-api/api/v0_1/builder.toml index f16911a948..7e7ad9d853 100644 --- a/crates/builder-api/api/v0_1/builder.toml +++ b/crates/builder-api/api/v0_1/builder.toml @@ -59,6 +59,19 @@ Get the specified block candidate. Returns application-specific encoded transactions type """ +[route.claim_block_with_num_nodes] +PATH = ["claimblockwithnumnodes/:block_hash/:view_number/:sender/:signature/:num_nodes"] +":block_hash" = "TaggedBase64" +":view_number" = "Integer" +":sender" = "TaggedBase64" +":signature" = "TaggedBase64" +":num_nodes" = "Integer" +DOC = """ +Get the specified block candidate and provide the number of nodes. + +Returns application-specific encoded transactions type +""" + [route.claim_header_input] PATH = ["claimheaderinput/:block_hash/:view_number/:sender/:signature"] ":block_hash" = "TaggedBase64" diff --git a/crates/builder-api/api/v0_1/submit.toml b/crates/builder-api/api/v0_1/submit.toml index 929ec45854..71894f77be 100644 --- a/crates/builder-api/api/v0_1/submit.toml +++ b/crates/builder-api/api/v0_1/submit.toml @@ -44,3 +44,13 @@ Submit a list of transactions to builder's private mempool." Returns the corresponding list of transaction hashes """ + +[route.get_status] +PATH = ["status/:transaction_hash"] +METHOD = "GET" +":transaction_hash" = "TaggedBase64" +DOC = """ +Get the transaction's status. + +Returns "pending", "sequenced" or "rejected" with error. +""" \ No newline at end of file diff --git a/crates/builder-api/src/v0_1/builder.rs b/crates/builder-api/src/v0_1/builder.rs index 0e1066a8ba..235e7c8606 100644 --- a/crates/builder-api/src/v0_1/builder.rs +++ b/crates/builder-api/src/v0_1/builder.rs @@ -50,6 +50,15 @@ pub enum BuildError { Error(String), } +/// Enum to keep track on status of a transaction +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +pub enum TransactionStatus { + Pending, + Sequenced { leaf: u64 }, + Rejected { reason: String }, // Rejection reason is in the String format + Unknown, +} + #[derive(Clone, Debug, Error, Deserialize, Serialize)] pub enum Error { #[error("Error processing request: {0}")] @@ -70,6 +79,8 @@ pub enum Error { TxnSubmit(BuildError), #[error("Error getting builder address: {0}")] BuilderAddress(#[from] BuildError), + #[error("Error getting transaction status: {0}")] + TxnStat(BuildError), #[error("Custom error {status}: {message}")] Custom { message: String, status: StatusCode }, } @@ -95,6 +106,7 @@ impl tide_disco::error::Error for Error { Error::TxnSubmit { .. } => StatusCode::INTERNAL_SERVER_ERROR, Error::Custom { .. } => StatusCode::INTERNAL_SERVER_ERROR, Error::BuilderAddress { .. } => StatusCode::INTERNAL_SERVER_ERROR, + Error::TxnStat { .. } => StatusCode::INTERNAL_SERVER_ERROR, } } } @@ -158,6 +170,29 @@ where } .boxed() })? + .get("claim_block_with_num_nodes", |req, state| { + async move { + let block_hash: BuilderCommitment = req.blob_param("block_hash")?; + let view_number = req.integer_param("view_number")?; + let signature = try_extract_param(&req, "signature")?; + let sender = try_extract_param(&req, "sender")?; + let num_nodes = req.integer_param("num_nodes")?; + state + .claim_block_with_num_nodes( + &block_hash, + view_number, + sender, + &signature, + num_nodes, + ) + .await + .map_err(|source| Error::BlockClaim { + source, + resource: block_hash.to_string(), + }) + } + .boxed() + })? .get("claim_header_input", |req, state| { async move { let block_hash: BuilderCommitment = req.blob_param("block_hash")?; @@ -216,6 +251,17 @@ where Ok(hashes) } .boxed() + })? + .at("get_status", |req: RequestParams, state| { + async move { + let tx = req + .body_auto::<::Transaction, Ver>(Ver::instance()) + .map_err(Error::TxnUnpack)?; + let hash = tx.commit(); + state.txn_status(hash).await.map_err(Error::TxnStat)?; + Ok(hash) + } + .boxed() })?; Ok(api) } diff --git a/crates/builder-api/src/v0_1/data_source.rs b/crates/builder-api/src/v0_1/data_source.rs index c36b457623..62f3703762 100644 --- a/crates/builder-api/src/v0_1/data_source.rs +++ b/crates/builder-api/src/v0_1/data_source.rs @@ -14,7 +14,7 @@ use hotshot_types::{ use super::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, - builder::BuildError, + builder::{BuildError, TransactionStatus}, }; #[async_trait] @@ -28,7 +28,7 @@ pub trait BuilderDataSource { signature: &::PureAssembledSignatureType, ) -> Result>, BuildError>; - /// to claim a block from the list of provided available blocks + /// To claim a block from the list of provided available blocks async fn claim_block( &self, block_hash: &BuilderCommitment, @@ -37,6 +37,17 @@ pub trait BuilderDataSource { signature: &::PureAssembledSignatureType, ) -> Result, BuildError>; + /// To claim a block from the list of provided available blocks and provide the number of nodes + /// information to the builder for VID computation. + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError>; + /// To claim a block header input async fn claim_block_header_input( &self, @@ -59,4 +70,9 @@ where &self, txns: Vec<::Transaction>, ) -> Result::Transaction>>, BuildError>; + + async fn txn_status( + &self, + txn_hash: Commitment<::Transaction>, + ) -> Result; } diff --git a/crates/example-types/Cargo.toml b/crates/example-types/Cargo.toml index e55d756685..c9fcc541f5 100644 --- a/crates/example-types/Cargo.toml +++ b/crates/example-types/Cargo.toml @@ -12,40 +12,29 @@ slow-tests = [] gpu-vid = ["hotshot-task-impls/gpu-vid"] [dependencies] +anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } +async-lock = { workspace = true } async-trait = { workspace = true } -anyhow = { workspace = true } -sha3 = "^0.10" +bitvec = { workspace = true } committable = { workspace = true } either = { workspace = true } +ethereum-types = { workspace = true } futures = { workspace = true } hotshot = { path = "../hotshot" } -hotshot-types = { path = "../types" } -hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } hotshot-builder-api = { path = "../builder-api" } +hotshot-task = { path = "../task" } +hotshot-task-impls = { path = "../task-impls", version = "0.5.36", default-features = false } +hotshot-types = { path = "../types" } +jf-vid = { workspace = true } rand = { workspace = true } -thiserror = { workspace = true } -tracing = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } +sha3 = "^0.10" +thiserror = { workspace = true } time = { workspace = true } -async-lock = { workspace = true } -bitvec = { workspace = true } -ethereum-types = { workspace = true } -hotshot-task = { path = "../task" } -vbs = { workspace = true } -url = { workspace = true } -reqwest = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(async_executor_impl, values("async-std"))', - 'cfg(async_executor_impl, values("tokio"))', - 'cfg(hotshot_example)', -] } +tracing = { workspace = true } +url = { workspace = true } +vbs = { workspace = true } diff --git a/crates/example-types/src/block_types.rs b/crates/example-types/src/block_types.rs index 7241ac49b1..add9d76269 100644 --- a/crates/example-types/src/block_types.rs +++ b/crates/example-types/src/block_types.rs @@ -337,6 +337,7 @@ impl< builder_commitment: BuilderCommitment, metadata: >::Metadata, _builder_fee: Vec>, + _view_number: u64, _vid_common: VidCommon, _auction_results: Option, _version: Version, diff --git a/crates/example-types/src/node_types.rs b/crates/example-types/src/node_types.rs index 8884d3e7ce..80b634515c 100644 --- a/crates/example-types/src/node_types.rs +++ b/crates/example-types/src/node_types.rs @@ -159,7 +159,7 @@ impl NodeImplementation for CombinedImpl { } impl NodeImplementation for Libp2pImpl { - type Network = Libp2pNetwork; + type Network = Libp2pNetwork; type Storage = TestStorage; type AuctionResultsProvider = TestAuctionResultsProvider; } @@ -176,6 +176,8 @@ impl Versions for TestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[derive(Clone, Debug, Copy)] @@ -190,6 +192,8 @@ impl Versions for MarketplaceUpgradeTestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[derive(Clone, Debug, Copy)] @@ -204,6 +208,24 @@ impl Versions for MarketplaceTestVersions { ]; type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; +} + +#[derive(Clone, Debug, Copy)] +pub struct EpochsTestVersions {} + +impl Versions for EpochsTestVersions { + type Base = StaticVersion<0, 4>; + type Upgrade = StaticVersion<0, 4>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; + + type Marketplace = StaticVersion<0, 3>; + + type Epochs = StaticVersion<0, 4>; } #[cfg(test)] @@ -230,8 +252,7 @@ mod tests { } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] /// Test that the view number affects the commitment post-marketplace async fn test_versioned_commitment_includes_view() { let upgrade_lock = UpgradeLock::new(); diff --git a/crates/example-types/src/state_types.rs b/crates/example-types/src/state_types.rs index 50c40fee73..c5fde414bf 100644 --- a/crates/example-types/src/state_types.rs +++ b/crates/example-types/src/state_types.rs @@ -107,6 +107,7 @@ impl ValidatedState for TestValidatedState { _proposed_header: &TYPES::BlockHeader, _vid_common: VidCommon, _version: Version, + _view_number: u64, ) -> Result<(Self, Self::Delta), Self::Error> { Self::run_delay_settings_from_config(&instance.delay_config).await; Ok(( diff --git a/crates/example-types/src/storage_types.rs b/crates/example-types/src/storage_types.rs index 2a093b1f02..c4be058fe4 100644 --- a/crates/example-types/src/storage_types.rs +++ b/crates/example-types/src/storage_types.rs @@ -23,8 +23,10 @@ use hotshot_types::{ storage::Storage, }, utils::View, + vid::VidSchemeType, vote::HasViewNumber, }; +use jf_vid::VidScheme; use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; @@ -40,6 +42,7 @@ pub struct TestStorageState { proposals: BTreeMap>>, high_qc: Option>, action: TYPES::View, + epoch: TYPES::Epoch, } impl Default for TestStorageState { @@ -50,6 +53,7 @@ impl Default for TestStorageState { proposals: BTreeMap::new(), high_qc: None, action: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), } } } @@ -99,6 +103,9 @@ impl TestStorage { pub async fn last_actioned_view(&self) -> TYPES::View { self.inner.read().await.action } + pub async fn last_actioned_epoch(&self) -> TYPES::Epoch { + self.inner.read().await.epoch + } } #[async_trait] @@ -117,7 +124,11 @@ impl Storage for TestStorage { Ok(()) } - async fn append_da(&self, proposal: &Proposal>) -> Result<()> { + async fn append_da( + &self, + proposal: &Proposal>, + _vid_commit: ::Commit, + ) -> Result<()> { if self.should_return_err { bail!("Failed to append VID proposal to storage"); } diff --git a/crates/example-types/src/testable_delay.rs b/crates/example-types/src/testable_delay.rs index 98bb6c7d6b..07f460eaf3 100644 --- a/crates/example-types/src/testable_delay.rs +++ b/crates/example-types/src/testable_delay.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, time::Duration}; -use async_compatibility_layer::art::async_sleep; use async_trait::async_trait; use rand::Rng; +use tokio::time::sleep; #[derive(Eq, Hash, PartialEq, Debug, Clone)] /// What type of delay we want to apply to @@ -87,13 +87,13 @@ pub trait TestableDelay { match settings.delay_option { DelayOptions::None => {} DelayOptions::Fixed => { - async_sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; + sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; } DelayOptions::Random => { let sleep_in_millis = rand::thread_rng().gen_range( settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, ); - async_sleep(Duration::from_millis(sleep_in_millis)).await; + sleep(Duration::from_millis(sleep_in_millis)).await; } } } diff --git a/crates/examples/Cargo.toml b/crates/examples/Cargo.toml index 313c6ac746..dc6434bb75 100644 --- a/crates/examples/Cargo.toml +++ b/crates/examples/Cargo.toml @@ -82,7 +82,6 @@ path = "push-cdn/whitelist-adapter.rs" [dependencies] async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" @@ -117,25 +116,12 @@ vec1 = { workspace = true } url = { workspace = true } tracing = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } + cdn-client = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-async-std"] } -cdn-broker = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } -cdn-marshal = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } - [dev-dependencies] clap.workspace = true toml = { workspace = true } diff --git a/crates/examples/combined/all.rs b/crates/examples/combined/all.rs index 567a5610cd..669b52e2f0 100644 --- a/crates/examples/combined/all.rs +++ b/crates/examples/combined/all.rs @@ -10,13 +10,10 @@ pub mod types; use std::path::Path; -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use cdn_broker::{reexports::def::hook::NoMessageHook, Broker}; use cdn_marshal::Marshal; use hotshot::{ + helpers::initialize_logging, traits::implementations::{HotShotMessageHook, KeyPair, TestingDef, WrappedSignatureKey}, types::SignatureKey, }; @@ -25,6 +22,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::spawn; use tracing::{error, instrument}; use crate::{ @@ -36,12 +34,11 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); let (config, orchestrator_url) = read_orchestrator_init_config::(); @@ -95,7 +92,7 @@ async fn main() { }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); @@ -123,7 +120,7 @@ async fn main() { }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal::SignatureKey>> = Marshal::new(marshal_config) .await @@ -136,7 +133,7 @@ async fn main() { }); // orchestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -150,7 +147,7 @@ async fn main() { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/crates/examples/combined/multi-validator.rs b/crates/examples/combined/multi-validator.rs index 71ab7afd91..b721cb5c4f 100644 --- a/crates/examples/combined/multi-validator.rs +++ b/crates/examples/combined/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi-validator using both the web server libp2p -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/crates/examples/combined/orchestrator.rs b/crates/examples/combined/orchestrator.rs index 17b6f2dec2..c3d399f489 100644 --- a/crates/examples/combined/orchestrator.rs +++ b/crates/examples/combined/orchestrator.rs @@ -8,7 +8,7 @@ /// types used for this example pub mod types; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; @@ -17,12 +17,12 @@ use crate::infra::{read_orchestrator_init_config, run_orchestrator, Orchestrator #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), diff --git a/crates/examples/combined/validator.rs b/crates/examples/combined/validator.rs index 5007181adc..fd6ff83957 100644 --- a/crates/examples/combined/validator.rs +++ b/crates/examples/combined/validator.rs @@ -6,8 +6,8 @@ //! A validator using both the web server and libp2p -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; @@ -22,12 +22,11 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); let mut args = ValidatorArgs::parse(); diff --git a/crates/examples/infra/mod.rs b/crates/examples/infra/mod.rs index b79769df6e..9e031c02fa 100755 --- a/crates/examples/infra/mod.rs +++ b/crates/examples/infra/mod.rs @@ -15,7 +15,6 @@ use std::{ time::Instant, }; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_trait::async_trait; use cdn_broker::reexports::crypto::signature::KeyPair; use chrono::Utc; @@ -258,9 +257,6 @@ pub fn read_orchestrator_init_config() -> (NetworkConfig( config_file: &str, @@ -273,11 +269,6 @@ pub fn load_config_from_file( let mut config: NetworkConfig = config_toml.into(); - // my_own_validator_config would be best to load from file, - // but its type is too complex to load so we'll generate it from seed now. - // Also this function is only used for orchestrator initialization now, so this value doesn't matter - config.config.my_own_validator_config = - ValidatorConfig::generated_from_seed_indexed(config.seed, config.node_index, 1, true); // initialize it with size for better assignment of peers' config config.config.known_nodes_with_stake = vec![PeerConfig::default(); config.config.num_nodes_with_stake.get() as usize]; @@ -357,6 +348,7 @@ pub trait RunDa< /// Initializes networking, returns self async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> Self; @@ -371,18 +363,17 @@ pub trait RunDa< .expect("Couldn't generate genesis block"); let config = self.config(); + let validator_config = self.validator_config(); // Get KeyPair for certificate Aggregation - let pk = config.config.my_own_validator_config.public_key.clone(); - let sk = config.config.my_own_validator_config.private_key.clone(); + let pk = validator_config.public_key.clone(); + let sk = validator_config.private_key.clone(); let network = self.network(); let all_nodes = if cfg!(feature = "fixed-leader-election") { let mut vec = config.config.known_nodes_with_stake.clone(); - vec.truncate(config.config.fixed_leader_for_gpuvid); - vec } else { config.config.known_nodes_with_stake.clone() @@ -602,6 +593,9 @@ pub trait RunDa< /// Returns the config for this run fn config(&self) -> NetworkConfig; + + /// Returns the validator config with private signature keys for this run. + fn validator_config(&self) -> ValidatorConfig; } // Push CDN @@ -610,6 +604,8 @@ pub trait RunDa< pub struct PushCdnDaRun { /// The underlying configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network network: PushCdnNetwork, } @@ -638,20 +634,18 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, _libp2p_advertise_address: Option, ) -> PushCdnDaRun { - // Get our own key - let key = config.config.my_own_validator_config.clone(); - // Convert to the Push-CDN-compatible type let keypair = KeyPair { - public_key: WrappedSignatureKey(key.public_key), - private_key: key.private_key, + public_key: WrappedSignatureKey(validator_config.public_key.clone()), + private_key: validator_config.private_key.clone(), }; // See if we should be DA, subscribe to the DA topic if so let mut topics = vec![CdnTopic::Global]; - if config.config.my_own_validator_config.is_da { + if validator_config.is_da { topics.push(CdnTopic::Da); } @@ -670,7 +664,11 @@ where // Wait for the network to be ready network.wait_for_ready().await; - PushCdnDaRun { config, network } + PushCdnDaRun { + config, + validator_config, + network, + } } fn network(&self) -> PushCdnNetwork { @@ -680,6 +678,10 @@ where fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } // Libp2p @@ -688,8 +690,10 @@ where pub struct Libp2pDaRun { /// The underlying network configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network - network: Libp2pNetwork, + network: Libp2pNetwork, } #[async_trait] @@ -702,12 +706,12 @@ impl< >, NODE: NodeImplementation< TYPES, - Network = Libp2pNetwork, + Network = Libp2pNetwork, Storage = TestStorage, AuctionResultsProvider = TestAuctionResultsProvider, >, V: Versions, - > RunDa, NODE, V> for Libp2pDaRun + > RunDa, NODE, V> for Libp2pDaRun where ::ValidatedState: TestableState, ::BlockPayload: TestableBlock, @@ -716,12 +720,12 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> Libp2pDaRun { // Extrapolate keys for ease of use - let keys = config.clone().config.my_own_validator_config; - let public_key = keys.public_key; - let private_key = keys.private_key; + let public_key = &validator_config.public_key; + let private_key = &validator_config.private_key; // In an example, we can calculate the libp2p bind address as a function // of the advertise address. @@ -746,6 +750,10 @@ where .to_string() }; + // Create the qurorum membership from the list of known nodes + let all_nodes = config.config.known_nodes_with_stake.clone(); + let quorum_membership = TYPES::Membership::new(all_nodes.clone(), all_nodes, Topic::Global); + // Derive the bind address let bind_address = derive_libp2p_multiaddr(&bind_address).expect("failed to derive bind address"); @@ -753,11 +761,12 @@ where // Create the Libp2p network let libp2p_network = Libp2pNetwork::from_config( config.clone(), + quorum_membership, GossipConfig::default(), RequestResponseConfig::default(), bind_address, - &public_key, - &private_key, + public_key, + private_key, Libp2pMetricsValue::default(), ) .await @@ -768,17 +777,22 @@ where Libp2pDaRun { config, + validator_config, network: libp2p_network, } } - fn network(&self) -> Libp2pNetwork { + fn network(&self) -> Libp2pNetwork { self.network.clone() } fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } // Combined network @@ -787,6 +801,8 @@ where pub struct CombinedDaRun { /// The underlying network configuration config: NetworkConfig, + /// The private validator config + validator_config: ValidatorConfig, /// The underlying network network: CombinedNetworks, } @@ -815,27 +831,34 @@ where { async fn initialize_networking( config: NetworkConfig, + validator_config: ValidatorConfig, libp2p_advertise_address: Option, ) -> CombinedDaRun { // Initialize our Libp2p network - let libp2p_network: Libp2pDaRun = - as RunDa< - TYPES, - Libp2pNetwork, - Libp2pImpl, - V, - >>::initialize_networking(config.clone(), libp2p_advertise_address.clone()) - .await; + let libp2p_network: Libp2pDaRun = as RunDa< + TYPES, + Libp2pNetwork, + Libp2pImpl, + V, + >>::initialize_networking( + config.clone(), + validator_config.clone(), + libp2p_advertise_address.clone(), + ) + .await; // Initialize our CDN network - let cdn_network: PushCdnDaRun = - as RunDa< - TYPES, - PushCdnNetwork, - PushCdnImpl, - V, - >>::initialize_networking(config.clone(), libp2p_advertise_address) - .await; + let cdn_network: PushCdnDaRun = as RunDa< + TYPES, + PushCdnNetwork, + PushCdnImpl, + V, + >>::initialize_networking( + config.clone(), + validator_config.clone(), + libp2p_advertise_address, + ) + .await; // Create our combined network config let delay_duration = config @@ -848,7 +871,11 @@ where CombinedNetworks::new(cdn_network.network, libp2p_network.network, delay_duration); // Return the run configuration - CombinedDaRun { config, network } + CombinedDaRun { + config, + validator_config, + network, + } } fn network(&self) -> CombinedNetworks { @@ -858,6 +885,10 @@ where fn config(&self) -> NetworkConfig { self.config.clone() } + + fn validator_config(&self) -> ValidatorConfig { + self.validator_config.clone() + } } /// Main entry point for validators @@ -885,32 +916,30 @@ pub async fn main_entry_point< ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { - setup_logging(); - setup_backtrace(); + // Initialize logging + hotshot::helpers::initialize_logging(); info!("Starting validator"); let orchestrator_client: OrchestratorClient = OrchestratorClient::new(args.url.clone()); // We assume one node will not call this twice to generate two validator_config-s with same identity. - let my_own_validator_config = - NetworkConfig::::generate_init_validator_config( - orchestrator_client - .get_node_index_for_init_validator_config() - .await, - // we assign nodes to the DA committee by default - true, - ); + let validator_config = NetworkConfig::::generate_init_validator_config( + orchestrator_client + .get_node_index_for_init_validator_config() + .await, + // we assign nodes to the DA committee by default + true, + ); // Derives our Libp2p private key from our private key, and then returns the public key of that key let libp2p_public_key = - derive_libp2p_peer_id::(&my_own_validator_config.private_key) + derive_libp2p_peer_id::(&validator_config.private_key) .expect("failed to derive Libp2p keypair"); // We need this to be able to register our node let peer_config = - PeerConfig::::to_bytes(&my_own_validator_config.public_config()) - .clone(); + PeerConfig::::to_bytes(&validator_config.public_config()).clone(); // Derive the advertise multiaddress from the supplied string let advertise_multiaddress = args.advertise_address.clone().map(|advertise_address| { @@ -924,16 +953,22 @@ pub async fn main_entry_point< // This function will be taken solely by sequencer right after OrchestratorClient::new, // which means the previous `generate_validator_config_when_init` will not be taken by sequencer, it's only for key pair generation for testing in hotshot. - let (mut run_config, source) = get_complete_config( + let (mut run_config, validator_config, source) = get_complete_config( &orchestrator_client, - my_own_validator_config, + validator_config, advertise_multiaddress, Some(libp2p_public_key), ) .await .expect("failed to get config"); - let builder_task = initialize_builder(&mut run_config, &args, &orchestrator_client).await; + let builder_task = initialize_builder( + &mut run_config, + &validator_config, + &args, + &orchestrator_client, + ) + .await; run_config.config.builder_urls = orchestrator_client .get_builder_addresses() @@ -953,7 +988,9 @@ pub async fn main_entry_point< ); info!("Initializing networking"); - let run = RUNDA::initialize_networking(run_config.clone(), args.advertise_address).await; + let run = + RUNDA::initialize_networking(run_config.clone(), validator_config, args.advertise_address) + .await; let hotshot = run.initialize_state_and_hotshot().await; if let Some(task) = builder_task { @@ -1014,6 +1051,7 @@ async fn initialize_builder< >, >( run_config: &mut NetworkConfig<::SignatureKey>, + validator_config: &ValidatorConfig<::SignatureKey>, args: &ValidatorArgs, orchestrator_client: &OrchestratorClient, ) -> Option>> @@ -1022,7 +1060,7 @@ where ::BlockPayload: TestableBlock, Leaf: TestableLeaf, { - if !run_config.config.my_own_validator_config.is_da { + if !validator_config.is_da { return None; } diff --git a/crates/examples/libp2p/all.rs b/crates/examples/libp2p/all.rs index 7cf2101d9f..4fd99cd0e8 100644 --- a/crates/examples/libp2p/all.rs +++ b/crates/examples/libp2p/all.rs @@ -8,13 +8,11 @@ /// types used for this example pub mod types; -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use infra::{gen_local_address, BUILDER_BASE_PORT, VALIDATOR_BASE_PORT}; +use tokio::spawn; use tracing::instrument; use crate::{ @@ -26,18 +24,17 @@ use crate::{ #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); // use configfile args let (config, orchestrator_url) = read_orchestrator_init_config::(); // orchestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -50,7 +47,7 @@ async fn main() { let advertise_address = gen_local_address::(i); let builder_address = gen_local_address::(i); let orchestrator_url = orchestrator_url.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/crates/examples/libp2p/multi-validator.rs b/crates/examples/libp2p/multi-validator.rs index 42621d6946..0767245c3b 100644 --- a/crates/examples/libp2p/multi-validator.rs +++ b/crates/examples/libp2p/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi-validator using libp2p -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/crates/examples/libp2p/types.rs b/crates/examples/libp2p/types.rs index afcfa236a1..ed8fbcda6f 100644 --- a/crates/examples/libp2p/types.rs +++ b/crates/examples/libp2p/types.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::TestTypes, storage_types::TestStorage, }; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use crate::infra::Libp2pDaRun; @@ -21,7 +21,7 @@ use crate::infra::Libp2pDaRun; pub struct NodeImpl {} /// Convenience type alias -pub type Network = Libp2pNetwork<::SignatureKey>; +pub type Network = Libp2pNetwork; impl NodeImplementation for NodeImpl { type Network = Network; diff --git a/crates/examples/libp2p/validator.rs b/crates/examples/libp2p/validator.rs index 1e2bb8d096..c85e52688e 100644 --- a/crates/examples/libp2p/validator.rs +++ b/crates/examples/libp2p/validator.rs @@ -6,8 +6,8 @@ //! A validator using libp2p -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use local_ip_address::local_ip; @@ -22,12 +22,12 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let mut args = ValidatorArgs::parse(); // If we did not set the advertise address, use our local IP and port 8000 diff --git a/crates/examples/orchestrator.rs b/crates/examples/orchestrator.rs index f78c0b35b1..3bb419b980 100644 --- a/crates/examples/orchestrator.rs +++ b/crates/examples/orchestrator.rs @@ -6,7 +6,7 @@ //! A orchestrator -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot::helpers::initialize_logging; use hotshot_example_types::state_types::TestTypes; use tracing::instrument; @@ -16,12 +16,12 @@ use crate::infra::{read_orchestrator_init_config, run_orchestrator, Orchestrator #[path = "./infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let (config, orchestrator_url) = read_orchestrator_init_config::(); run_orchestrator::(OrchestratorArgs:: { url: orchestrator_url.clone(), diff --git a/crates/examples/push-cdn/README.md b/crates/examples/push-cdn/README.md index c49f7dedb2..c460beb89a 100644 --- a/crates/examples/push-cdn/README.md +++ b/crates/examples/push-cdn/README.md @@ -28,27 +28,27 @@ Examples: **Run Locally** -`just async_std example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml` +`just example all-push-cdn -- --config_file ./crates/orchestrator/run-config.toml` OR ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 -just async_std example multi-validator-push-cdn -- 10 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 +just example multi-validator-push-cdn -- 10 http://127.0.0.1:4444 ``` **Run with GPU-VID** ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 -just async_std example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just example_gpuvid_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 sleep 1m -just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. @@ -57,12 +57,12 @@ Where ones using `example_gpuvid_leader` could be the leader and should be runni If you don't have a gpu but want to test out fixed leader, you can run: ``` docker run --rm -p 0.0.0.0:6379:6379 eqalpha/keydb -just async_std example cdn-marshal -- -d redis://localhost:6379 -b 9000 -just async_std example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 -just async_std example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 -just async_std example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 +just example cdn-marshal -- -d redis://localhost:6379 -b 9000 +just example cdn-broker -- -d redis://localhost:6379 --public-bind-endpoint 0.0.0.0:1740 --public-advertise-endpoint local_ip:1740 --private-bind-endpoint 0.0.0.0:1741 --private-advertise-endpoint local_ip:1741 +just example_fixed_leader orchestrator -- --config_file ./crates/orchestrator/run-config.toml --orchestrator_url http://0.0.0.0:4444 --fixed_leader_for_gpuvid 1 +just example_fixed_leader multi-validator-push-cdn -- 1 http://127.0.0.1:4444 sleep 1m -just async_std example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 +just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file diff --git a/crates/examples/push-cdn/all.rs b/crates/examples/push-cdn/all.rs index 4312f32d12..76c97c0474 100644 --- a/crates/examples/push-cdn/all.rs +++ b/crates/examples/push-cdn/all.rs @@ -10,13 +10,13 @@ pub mod types; use std::path::Path; -use async_compatibility_layer::art::async_spawn; use cdn_broker::{ reexports::{crypto::signature::KeyPair, def::hook::NoMessageHook}, Broker, }; use cdn_marshal::Marshal; use hotshot::{ + helpers::initialize_logging, traits::implementations::{HotShotMessageHook, TestingDef, WrappedSignatureKey}, types::SignatureKey, }; @@ -25,6 +25,7 @@ use hotshot_orchestrator::client::ValidatorArgs; use hotshot_types::traits::node_implementation::NodeType; use infra::{gen_local_address, BUILDER_BASE_PORT}; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::spawn; use crate::{ infra::{read_orchestrator_init_config, run_orchestrator, OrchestratorArgs}, @@ -37,18 +38,16 @@ pub mod infra; use tracing::error; -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); // use configfile args let (config, orchestrator_url) = read_orchestrator_init_config::(); // Start the orhcestrator - async_spawn(run_orchestrator::(OrchestratorArgs { + spawn(run_orchestrator::(OrchestratorArgs { url: orchestrator_url.clone(), config: config.clone(), })); @@ -104,7 +103,7 @@ async fn main() { }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker::SignatureKey>> = Broker::new(config).await.expect("broker failed to start"); @@ -132,7 +131,7 @@ async fn main() { }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal::SignatureKey>> = Marshal::new(marshal_config) .await @@ -149,7 +148,7 @@ async fn main() { for i in 0..(config.config.num_nodes_with_stake.get()) { let orchestrator_url = orchestrator_url.clone(); let builder_address = gen_local_address::(i); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs { url: orchestrator_url, diff --git a/crates/examples/push-cdn/broker.rs b/crates/examples/push-cdn/broker.rs index 6e58ae7ca8..0665fb9b61 100644 --- a/crates/examples/push-cdn/broker.rs +++ b/crates/examples/push-cdn/broker.rs @@ -70,8 +70,8 @@ struct Args { #[arg(long, default_value_t = 1_073_741_824)] global_memory_pool_size: usize, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] + +#[tokio::main] async fn main() -> Result<()> { // Parse command line arguments let args = Args::parse(); diff --git a/crates/examples/push-cdn/marshal.rs b/crates/examples/push-cdn/marshal.rs index 39d2267bd8..569cb0dc33 100644 --- a/crates/examples/push-cdn/marshal.rs +++ b/crates/examples/push-cdn/marshal.rs @@ -52,8 +52,7 @@ struct Args { global_memory_pool_size: usize, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() -> Result<()> { // Parse command-line arguments let args = Args::parse(); diff --git a/crates/examples/push-cdn/multi-validator.rs b/crates/examples/push-cdn/multi-validator.rs index b8070f8b1c..54718468b3 100644 --- a/crates/examples/push-cdn/multi-validator.rs +++ b/crates/examples/push-cdn/multi-validator.rs @@ -5,13 +5,11 @@ // along with the HotShot repository. If not, see . //! A multi validator -use async_compatibility_layer::{ - art::async_spawn, - logging::{setup_backtrace, setup_logging}, -}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::{MultiValidatorArgs, ValidatorArgs}; +use tokio::spawn; use tracing::instrument; use crate::types::{Network, NodeImpl, ThisRun}; @@ -23,19 +21,19 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = MultiValidatorArgs::parse(); tracing::debug!("connecting to orchestrator at {:?}", args.url); let mut nodes = Vec::new(); for node_index in 0..args.num_nodes { let args = args.clone(); - let node = async_spawn(async move { + let node = spawn(async move { infra::main_entry_point::( ValidatorArgs::from_multi_args(args, node_index), ) diff --git a/crates/examples/push-cdn/validator.rs b/crates/examples/push-cdn/validator.rs index 70d53cdc33..7b546dfabe 100644 --- a/crates/examples/push-cdn/validator.rs +++ b/crates/examples/push-cdn/validator.rs @@ -5,8 +5,8 @@ // along with the HotShot repository. If not, see . //! A validator -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; +use hotshot::helpers::initialize_logging; use hotshot_example_types::{node_types::TestVersions, state_types::TestTypes}; use hotshot_orchestrator::client::ValidatorArgs; use tracing::{debug, instrument}; @@ -20,12 +20,12 @@ pub mod types; #[path = "../infra/mod.rs"] pub mod infra; -#[cfg_attr(async_executor_impl = "tokio", tokio::main(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] #[instrument] async fn main() { - setup_logging(); - setup_backtrace(); + // Initialize logging + initialize_logging(); + let args = ValidatorArgs::parse(); debug!("connecting to orchestrator at {:?}", args.url); infra::main_entry_point::(args).await; diff --git a/crates/examples/push-cdn/whitelist-adapter.rs b/crates/examples/push-cdn/whitelist-adapter.rs index f787f271e4..e855a41aba 100644 --- a/crates/examples/push-cdn/whitelist-adapter.rs +++ b/crates/examples/push-cdn/whitelist-adapter.rs @@ -40,8 +40,7 @@ struct Args { local_discovery: bool, } -#[cfg_attr(async_executor_impl = "tokio", tokio::main)] -#[cfg_attr(async_executor_impl = "async-std", async_std::main)] +#[tokio::main] async fn main() -> Result<()> { // Parse the command line arguments let args = Args::parse(); diff --git a/crates/fakeapi/Cargo.toml b/crates/fakeapi/Cargo.toml index 315af7f949..b2bd022c45 100644 --- a/crates/fakeapi/Cargo.toml +++ b/crates/fakeapi/Cargo.toml @@ -11,8 +11,8 @@ repository.workspace = true [dependencies] toml = { workspace = true } tide-disco = { workspace = true } +tokio = { workspace = true } anyhow = { workspace = true } -async-compatibility-layer = { workspace = true } hotshot-types = { path = "../types" } vbs = { workspace = true } serde = { workspace = true } diff --git a/crates/fakeapi/src/fake_solver.rs b/crates/fakeapi/src/fake_solver.rs index 3c4beabd03..b52418cc9b 100644 --- a/crates/fakeapi/src/fake_solver.rs +++ b/crates/fakeapi/src/fake_solver.rs @@ -4,7 +4,6 @@ use std::{ }; use anyhow::Result; -use async_compatibility_layer::art::async_sleep; use async_lock::RwLock; use futures::FutureExt; use hotshot_example_types::auction_results_provider_types::TestAuctionResult; @@ -95,7 +94,7 @@ impl FakeSolverState { } FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval - async_sleep(SOLVER_MAX_TIMEOUT_S).await; + tokio::time::sleep(SOLVER_MAX_TIMEOUT_S).await; } } } diff --git a/crates/hotshot/Cargo.toml b/crates/hotshot/Cargo.toml index 1603f3f180..b842a404a5 100644 --- a/crates/hotshot/Cargo.toml +++ b/crates/hotshot/Cargo.toml @@ -24,7 +24,6 @@ hotshot-testing = [] [dependencies] anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bimap = "0.6" @@ -51,6 +50,7 @@ thiserror = { workspace = true } surf-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } vbs = { workspace = true } jf-signature.workspace = true blake3.workspace = true @@ -61,27 +61,12 @@ parking_lot = "0.12" twox-hash = { version = "1", default-features = false } utils = { path = "../utils" } gcr = "0.1" -tracing-subscriber = "0.3" -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } cdn-client = { workspace = true } cdn-broker = { workspace = true, features = ["global-permits"] } cdn-marshal = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } -cdn-client = { workspace = true, features = ["runtime-async-std"] } -cdn-broker = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } -cdn-marshal = { workspace = true, features = [ - "runtime-async-std", - "global-permits", -] } - - [dev-dependencies] blake3 = { workspace = true } clap.workspace = true diff --git a/crates/hotshot/src/helpers.rs b/crates/hotshot/src/helpers.rs new file mode 100644 index 0000000000..e685e26cdd --- /dev/null +++ b/crates/hotshot/src/helpers.rs @@ -0,0 +1,35 @@ +use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}; + +/// Initializes logging +pub fn initialize_logging() { + // Parse the `RUST_LOG_SPAN_EVENTS` environment variable + let span_event_filter = match std::env::var("RUST_LOG_SPAN_EVENTS") { + Ok(val) => val + .split(',') + .map(|s| match s.trim() { + "new" => FmtSpan::NEW, + "enter" => FmtSpan::ENTER, + "exit" => FmtSpan::EXIT, + "close" => FmtSpan::CLOSE, + "active" => FmtSpan::ACTIVE, + "full" => FmtSpan::FULL, + _ => FmtSpan::NONE, + }) + .fold(FmtSpan::NONE, |acc, x| acc | x), + Err(_) => FmtSpan::NONE, + }; + + // Conditionally initialize in `json` mode + if std::env::var("RUST_LOG_FORMAT") == Ok("json".to_string()) { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_span_events(span_event_filter) + .json() + .try_init(); + } else { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_span_events(span_event_filter) + .try_init(); + }; +} diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index af7e210da2..05ccea0a0c 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -26,6 +26,9 @@ pub mod types; pub mod tasks; +/// Contains helper functions for the crate +pub mod helpers; + use std::{ collections::{BTreeMap, HashMap}, num::NonZeroUsize, @@ -34,7 +37,6 @@ use std::{ }; use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::join; @@ -65,6 +67,7 @@ use hotshot_types::{ // External /// Reexport rand crate pub use rand; +use tokio::{spawn, time::sleep}; use tracing::{debug, instrument, trace}; use crate::{ @@ -125,6 +128,9 @@ pub struct SystemContext, V: Versi /// The view to enter when first starting consensus start_view: TYPES::View, + /// The epoch to enter when first starting consensus + start_epoch: TYPES::Epoch, + /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -168,6 +174,7 @@ impl, V: Versions> Clone consensus: self.consensus.clone(), instance_state: Arc::clone(&self.instance_state), start_view: self.start_view, + start_epoch: self.start_epoch, output_event_stream: self.output_event_stream.clone(), external_event_stream: self.external_event_stream.clone(), anchored_leaf: self.anchored_leaf.clone(), @@ -325,6 +332,7 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext(&mut handle).await; @@ -705,7 +700,7 @@ where let (network_task_sender, mut receiver_from_network): Channel> = broadcast(EVENT_CHANNEL_SIZE); - let _recv_loop_handle = async_spawn(async move { + let _recv_loop_handle = spawn(async move { loop { let msg = match select(left_receiver.recv(), right_receiver.recv()).await { Either::Left(msg) => Either::Left(msg.0.unwrap().as_ref().clone()), @@ -721,7 +716,7 @@ where } }); - let _send_loop_handle = async_spawn(async move { + let _send_loop_handle = spawn(async move { loop { if let Ok(msg) = receiver_from_network.recv().await { let mut state = send_state.write().await; @@ -765,6 +760,7 @@ where SystemContextHandle, SystemContextHandle, ) { + let epoch_height = config.epoch_height; let left_system_context = SystemContext::new( public_key.clone(), private_key.clone(), @@ -832,6 +828,7 @@ where storage: Arc::clone(&left_system_context.storage), network: Arc::clone(&left_system_context.network), memberships: Arc::clone(&left_system_context.memberships), + epoch_height, }; let mut right_handle = SystemContextHandle { @@ -843,6 +840,7 @@ where storage: Arc::clone(&right_system_context.storage), network: Arc::clone(&right_system_context.network), memberships: Arc::clone(&right_system_context.memberships), + epoch_height, }; // add consensus tasks to each handle, using their individual internal event streams @@ -977,6 +975,8 @@ pub struct HotShotInitializer { /// Starting view number that should be equivelant to the view the node shut down with last. start_view: TYPES::View, + /// Starting epoch number that should be equivelant to the epoch the node shut down with last. + start_epoch: TYPES::Epoch, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. actioned_view: TYPES::View, @@ -1010,6 +1010,7 @@ impl HotShotInitializer { validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), start_view: TYPES::View::new(0), + start_epoch: TYPES::Epoch::new(0), actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, @@ -1024,15 +1025,16 @@ impl HotShotInitializer { /// /// # Arguments /// * `start_view` - The minimum view number that we are confident won't lead to a double vote - /// after restart. + /// after restart. /// * `validated_state` - Optional validated state that if given, will be used to construct the - /// `SystemContext`. + /// `SystemContext`. #[allow(clippy::too_many_arguments)] pub fn from_reload( anchor_leaf: Leaf, instance_state: TYPES::InstanceState, validated_state: Option>, start_view: TYPES::View, + start_epoch: TYPES::Epoch, actioned_view: TYPES::View, saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, @@ -1046,6 +1048,7 @@ impl HotShotInitializer { validated_state, state_delta: None, start_view, + start_epoch, actioned_view, saved_proposals, high_qc, diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index cca95d3c76..6ddd00b252 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -8,10 +8,14 @@ /// Provides trait to create task states from a `SystemContextHandle` pub mod task_state; -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; +use crate::{ + tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, + ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, + MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, +}; use async_broadcast::{broadcast, RecvError}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::{ @@ -33,7 +37,7 @@ use hotshot_task_impls::{ view_sync::ViewSyncTaskState, }; use hotshot_types::{ - consensus::Consensus, + consensus::{Consensus, OuterConsensus}, constants::EVENT_CHANNEL_SIZE, message::{Message, UpgradeLock}, traits::{ @@ -41,14 +45,9 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use tokio::{spawn, time::sleep}; use vbs::version::StaticVersionType; -use crate::{ - tasks::task_state::CreateTaskState, types::SystemContextHandle, ConsensusApi, - ConsensusMetricsValue, ConsensusTaskRegistry, HotShotConfig, HotShotInitializer, - MarketplaceConfig, Memberships, NetworkTaskRegistry, SignatureKey, SystemContext, Versions, -}; - /// event for global event stream #[derive(Clone, Debug)] pub enum GlobalEvent { @@ -101,14 +100,14 @@ pub fn add_queue_len_task, V: Vers let consensus = handle.hotshot.consensus(); let rx = handle.internal_event_stream.1.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let task_handle = async_spawn(async move { + let task_handle = spawn(async move { futures::pin_mut!(shutdown_signal); loop { futures::select! { () = shutdown_signal => { return; }, - () = async_sleep(Duration::from_millis(500)).fuse() => { + () = sleep(Duration::from_millis(500)).fuse() => { consensus.read().await.metrics.internal_event_queue_len.set(rx.len()); } } @@ -118,6 +117,7 @@ pub fn add_queue_len_task, V: Vers } /// Add the network task to handle messages and publish events. +#[allow(clippy::missing_panics_doc)] pub fn add_network_message_task< TYPES: NodeType, I: NodeImplementation, @@ -131,6 +131,7 @@ pub fn add_network_message_task< internal_event_stream: handle.internal_event_stream.0.clone(), external_event_stream: handle.output_event_stream.0.clone(), public_key: handle.public_key().clone(), + transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), }; let upgrade_lock = handle.hotshot.upgrade_lock.clone(); @@ -138,7 +139,7 @@ pub fn add_network_message_task< let network = Arc::clone(channel); let mut state = network_state.clone(); let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let task_handle = async_spawn(async move { + let task_handle = spawn(async move { futures::pin_mut!(shutdown_signal); loop { @@ -198,8 +199,9 @@ pub fn add_network_event_task< quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), - consensus: Arc::clone(&handle.consensus()), + consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + transmit_tasks: BTreeMap::new(), }; let task = Task::new( network_state, @@ -327,6 +329,7 @@ where storage: I::Storage, marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { + let epoch_height = config.epoch_height; let hotshot = SystemContext::new( public_key, private_key, @@ -355,6 +358,7 @@ where storage: Arc::clone(&hotshot.storage), network: Arc::clone(&hotshot.network), memberships: Arc::clone(&hotshot.memberships), + epoch_height, }; add_consensus_tasks::(&mut handle).await; @@ -408,7 +412,7 @@ where let private_key = handle.private_key().clone(); let upgrade_lock = handle.hotshot.upgrade_lock.clone(); let consensus = Arc::clone(&handle.hotshot.consensus()); - let send_handle = async_spawn(async move { + let send_handle = spawn(async move { futures::pin_mut!(shutdown_signal); let recv_stream = stream::unfold(original_receiver, |mut recv| async move { @@ -462,7 +466,7 @@ where // spawn a task to listen on the newly created event stream, // and broadcast the transformed events to the original internal event stream let shutdown_signal = create_shutdown_event_monitor(handle).fuse(); - let recv_handle = async_spawn(async move { + let recv_handle = spawn(async move { futures::pin_mut!(shutdown_signal); let network_recv_stream = diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index ead7b745f6..09bc3b0eb4 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -9,7 +9,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; -use async_compatibility_layer::art::async_spawn; use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ @@ -26,6 +25,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use tokio::spawn; use crate::{types::SystemContextHandle, Versions}; @@ -241,6 +241,7 @@ impl, V: Versions> CreateTaskState id: handle.hotshot.id, storage: Arc::clone(&handle.storage), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } @@ -268,6 +269,7 @@ impl, V: Versions> CreateTaskState id: handle.hotshot.id, formed_upgrade_certificate: None, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } @@ -284,20 +286,15 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, - cur_view_time: Utc::now().timestamp(), cur_epoch: handle.cur_epoch().await, - network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), - timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), - proposal_cert: None, spawned_tasks: BTreeMap::new(), - instance_state: handle.hotshot.instance_state(), id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } @@ -324,12 +321,12 @@ impl, V: Versions> CreateTaskState cur_view_time: Utc::now().timestamp(), cur_epoch: handle.cur_epoch().await, output_event_stream: handle.hotshot.external_event_stream.0.clone(), - timeout_task: async_spawn(async {}), + timeout_task: spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, consensus: OuterConsensus::new(consensus), - last_decided_view: handle.cur_view().await, id: handle.hotshot.id, upgrade_lock: handle.hotshot.upgrade_lock.clone(), + epoch_height: handle.hotshot.config.epoch_height, } } } diff --git a/crates/hotshot/src/traits/networking/combined_network.rs b/crates/hotshot/src/traits/networking/combined_network.rs index 584e4e888e..6c891fa6f7 100644 --- a/crates/hotshot/src/traits/networking/combined_network.rs +++ b/crates/hotshot/src/traits/networking/combined_network.rs @@ -19,10 +19,6 @@ use std::{ }; use async_broadcast::{broadcast, InactiveReceiver, Sender}; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::TrySendError, -}; use async_lock::RwLock; use async_trait::async_trait; use futures::{join, select, FutureExt}; @@ -45,6 +41,7 @@ use hotshot_types::{ }; use lru::LruCache; use parking_lot::RwLock as PlRwLock; +use tokio::{spawn, sync::mpsc::error::TrySendError, time::sleep}; use tracing::{debug, info, warn}; use super::{push_cdn_network::PushCdnNetwork, NetworkError}; @@ -95,7 +92,7 @@ impl CombinedNetworks { #[must_use] pub fn new( primary_network: PushCdnNetwork, - secondary_network: Libp2pNetwork, + secondary_network: Libp2pNetwork, delay_duration: Option, ) -> Self { // Create networks from the ones passed in @@ -127,7 +124,7 @@ impl CombinedNetworks { /// Get a ref to the backup network #[must_use] - pub fn secondary(&self) -> &Libp2pNetwork { + pub fn secondary(&self) -> &Libp2pNetwork { &self.networks.1 } @@ -183,8 +180,8 @@ impl CombinedNetworks { .1 .activate_cloned(); // Spawn a task that sleeps for `duration` and then sends the message if it wasn't cancelled - async_spawn(async move { - async_sleep(duration).await; + spawn(async move { + sleep(duration).await; if receiver.try_recv().is_ok() { // The task has been cancelled because the view progressed, it means the primary is working fine debug!( @@ -251,7 +248,7 @@ impl CombinedNetworks { #[derive(Clone)] pub struct UnderlyingCombinedNetworks( pub PushCdnNetwork, - pub Libp2pNetwork, + pub Libp2pNetwork, ); #[cfg(feature = "hotshot-testing")] @@ -273,7 +270,7 @@ impl TestableNetworkingImplementation for CombinedNetwor None, Duration::default(), ), - as TestableNetworkingImplementation>::generator( + as TestableNetworkingImplementation>::generator( expected_node_count, num_bootstrap, network_id, @@ -297,7 +294,7 @@ impl TestableNetworkingImplementation for CombinedNetwor // Combine the two let underlying_combined = UnderlyingCombinedNetworks( cdn.clone(), - Arc::>::unwrap_or_clone(p2p), + Arc::>::unwrap_or_clone(p2p), ); // We want to use the same message cache between the two networks @@ -462,11 +459,8 @@ impl ConnectedNetwork for CombinedNetworks // Calculate hash of the message let message_hash = calculate_hash_of(&message); - // Check if the hash is in the cache - if !self.message_cache.read().contains(&message_hash) { - // Add the hash to the cache - self.message_cache.write().put(message_hash, ()); - + // Check if the hash is in the cache and update the cache + if self.message_cache.write().put(message_hash, ()).is_none() { break Ok(message); } } @@ -486,7 +480,7 @@ impl ConnectedNetwork for CombinedNetworks T: NodeType + 'a, { let delayed_tasks_channels = Arc::clone(&self.delayed_tasks_channels); - async_spawn(async move { + spawn(async move { let mut map_lock = delayed_tasks_channels.write().await; while let Some((first_view, _)) = map_lock.first_key_value() { // Broadcast a cancelling signal to all the tasks related to each view older than the new one diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 6567450243..0713429e66 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -23,20 +23,10 @@ use std::{ }; use anyhow::{anyhow, Context}; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::{ - self, bounded, unbounded, Receiver as BoundedReceiver, Sender as BoundedSender, - TrySendError, UnboundedReceiver, UnboundedSender, - }, -}; use async_lock::RwLock; use async_trait::async_trait; use bimap::BiHashMap; -use futures::{ - future::{join_all, Either}, - FutureExt, -}; +use futures::future::join_all; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{ AsyncGenerator, NetworkReliability, TestableNetworkingImplementation, @@ -51,7 +41,7 @@ use hotshot_types::{ metrics::{Counter, Gauge, Metrics, NoMetrics}, network::{ConnectedNetwork, NetworkError, Topic}, node_implementation::{ConsensusTime, NodeType}, - signature_key::SignatureKey, + signature_key::{PrivateSignatureKey, SignatureKey}, }, BoxSyncFuture, }; @@ -73,6 +63,14 @@ use libp2p_networking::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; +use tokio::{ + select, spawn, + sync::{ + mpsc::{channel, error::TrySendError, Receiver, Sender}, + Mutex, + }, + time::sleep, +}; use tracing::{error, info, instrument, trace, warn}; use crate::BroadcastDelay; @@ -132,7 +130,7 @@ pub struct Empty { byte: u8, } -impl Debug for Libp2pNetwork { +impl Debug for Libp2pNetwork { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Libp2p").field("inner", &"inner").finish() } @@ -143,17 +141,17 @@ pub type PeerInfoVec = Arc>>; /// The underlying state of the libp2p network #[derive(Debug)] -struct Libp2pNetworkInner { +struct Libp2pNetworkInner { /// this node's public key - pk: K, + pk: T::SignatureKey, /// handle to control the network - handle: Arc>, + handle: Arc>, /// Message Receiver - receiver: UnboundedReceiver>, + receiver: Mutex>>, /// Sender for broadcast messages - sender: UnboundedSender>, + sender: Sender>, /// Sender for node lookup (relevant view number, key of node) (None for shutdown) - node_lookup_send: BoundedSender>, + node_lookup_send: Sender>, /// this is really cheating to enable local tests /// hashset of (bootstrap_addr, peer_id) bootstrap_addrs: PeerInfoVec, @@ -175,21 +173,19 @@ struct Libp2pNetworkInner { /// reliability_config reliability_config: Option>, /// Killswitch sender - kill_switch: channel::Sender<()>, + kill_switch: Sender<()>, } /// Networking implementation that uses libp2p /// generic over `M` which is the message type #[derive(Clone)] -pub struct Libp2pNetwork { +pub struct Libp2pNetwork { /// holds the state of the libp2p network - inner: Arc>, + inner: Arc>, } #[cfg(feature = "hotshot-testing")] -impl TestableNetworkingImplementation - for Libp2pNetwork -{ +impl TestableNetworkingImplementation for Libp2pNetwork { /// Returns a boxed function `f(node_id, public_key) -> Libp2pNetwork` /// with the purpose of generating libp2p networks. /// Generates `num_bootstrap` bootstrap nodes. The remainder of nodes are normal @@ -232,12 +228,11 @@ impl TestableNetworkingImplementation Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{port}/quic-v1")).unwrap(); // We assign node's public key and stake value rather than read from config file since it's a test - let privkey = - TYPES::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; - let pubkey = TYPES::SignatureKey::from_private(&privkey); + let privkey = T::SignatureKey::generated_from_seed_indexed([0u8; 32], node_id).1; + let pubkey = T::SignatureKey::from_private(&privkey); // Derive the Libp2p keypair from the private key - let libp2p_keypair = derive_libp2p_keypair::(&privkey) + let libp2p_keypair = derive_libp2p_keypair::(&privkey) .expect("Failed to derive libp2p keypair"); // Sign the lookup record @@ -313,7 +308,7 @@ pub fn derive_libp2p_keypair( private_key: &K::PrivateKey, ) -> anyhow::Result { // Derive a secondary key from our primary private key - let derived_key = blake3::derive_key("libp2p key", &(bincode::serialize(&private_key)?)); + let derived_key = blake3::derive_key("libp2p key", &private_key.to_bytes()); let derived_key = SecretKey::try_from_bytes(derived_key)?; // Create an `ed25519` keypair from the derived key @@ -383,7 +378,7 @@ pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { }) } -impl Libp2pNetwork { +impl Libp2pNetwork { /// Create and return a Libp2p network from a network config file /// and various other configuration-specific values. /// @@ -392,13 +387,15 @@ impl Libp2pNetwork { /// /// # Panics /// If we are unable to calculate the replication factor + #[allow(clippy::too_many_arguments)] pub async fn from_config( - mut config: NetworkConfig, + mut config: NetworkConfig, + quorum_membership: T::Membership, gossip_config: GossipConfig, request_response_config: RequestResponseConfig, bind_address: Multiaddr, - pub_key: &K, - priv_key: &K::PrivateKey, + pub_key: &T::SignatureKey, + priv_key: &::PrivateKey, metrics: Libp2pMetricsValue, ) -> anyhow::Result { // Try to take our Libp2p config from our broader network config @@ -408,7 +405,7 @@ impl Libp2pNetwork { .ok_or(anyhow!("Libp2p config not supplied"))?; // Derive our Libp2p keypair from our supplied private key - let keypair = derive_libp2p_keypair::(priv_key)?; + let keypair = derive_libp2p_keypair::(priv_key)?; // Build our libp2p configuration let mut config_builder = NetworkNodeConfigBuilder::default(); @@ -417,21 +414,14 @@ impl Libp2pNetwork { config_builder.gossip_config(gossip_config.clone()); config_builder.request_response_config(request_response_config); - // Extrapolate the stake table from the known nodes - let stake_table: HashSet = config - .config - .known_nodes_with_stake - .iter() - .map(|node| K::public_key(&node.stake_table_entry)) - .collect(); - + // Construct the auth message let auth_message = construct_auth_message(pub_key, &keypair.public().to_peer_id(), priv_key) .with_context(|| "Failed to construct auth message")?; // Set the auth message and stake table config_builder - .stake_table(Some(stake_table)) + .stake_table(Some(quorum_membership)) .auth_message(Some(auth_message)); // The replication factor is the minimum of [the default and 2/3 the number of nodes] @@ -474,7 +464,7 @@ impl Libp2pNetwork { // Insert all known nodes into the set of all keys for node in config.config.known_nodes_with_stake { - all_keys.insert(K::public_key(&node.stake_table_entry)); + all_keys.insert(T::SignatureKey::public_key(&node.stake_table_entry)); } Ok(Libp2pNetwork::new( @@ -502,7 +492,7 @@ impl Libp2pNetwork { if self.is_ready() { break; } - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } } @@ -521,14 +511,14 @@ impl Libp2pNetwork { #[allow(clippy::too_many_arguments)] pub async fn new( metrics: Libp2pMetricsValue, - config: NetworkNodeConfig, - pk: K, - lookup_record_value: RecordValue, + config: NetworkNodeConfig, + pk: T::SignatureKey, + lookup_record_value: RecordValue, bootstrap_addrs: BootstrapAddrs, id: usize, #[cfg(feature = "hotshot-testing")] reliability_config: Option>, - ) -> Result, NetworkError> { - let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) + ) -> Result, NetworkError> { + let (mut rx, network_handle) = spawn_network_node::(config.clone(), id) .await .map_err(|e| NetworkError::ConfigError(format!("failed to spawn network node: {e}")))?; @@ -545,15 +535,15 @@ impl Libp2pNetwork { // unbounded channels may not be the best choice (spammed?) // if bounded figure out a way to log dropped msgs - let (sender, receiver) = unbounded(); - let (node_lookup_send, node_lookup_recv) = bounded(10); - let (kill_tx, kill_rx) = bounded(1); + let (sender, receiver) = channel(1000); + let (node_lookup_send, node_lookup_recv) = channel(10); + let (kill_tx, kill_rx) = channel(1); rx.set_kill_switch(kill_rx); let mut result = Libp2pNetwork { inner: Arc::new(Libp2pNetworkInner { handle: Arc::new(network_handle), - receiver, + receiver: Mutex::new(receiver), sender: sender.clone(), pk, bootstrap_addrs, @@ -586,15 +576,18 @@ impl Libp2pNetwork { /// Spawns task for looking up nodes pre-emptively #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - fn spawn_node_lookup(&self, mut node_lookup_recv: BoundedReceiver>) { + fn spawn_node_lookup( + &self, + mut node_lookup_recv: Receiver>, + ) { let handle = Arc::clone(&self.inner.handle); let dht_timeout = self.inner.dht_timeout; let latest_seen_view = Arc::clone(&self.inner.latest_seen_view); // deals with handling lookup queue. should be infallible - async_spawn(async move { + spawn(async move { // cancels on shutdown - while let Ok(Some((view_number, pk))) = node_lookup_recv.recv().await { + while let Some(Some((view_number, pk))) = node_lookup_recv.recv().await { /// defines lookahead threshold based on the constant #[allow(clippy::cast_possible_truncation)] const THRESHOLD: u64 = (LOOK_AHEAD as f64 * 0.8) as u64; @@ -613,26 +606,26 @@ impl Libp2pNetwork { } /// Initiates connection to the outside world - fn spawn_connect(&mut self, id: usize, lookup_record_value: RecordValue) { + fn spawn_connect(&mut self, id: usize, lookup_record_value: RecordValue) { let pk = self.inner.pk.clone(); let bootstrap_ref = Arc::clone(&self.inner.bootstrap_addrs); let handle = Arc::clone(&self.inner.handle); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); let inner = Arc::clone(&self.inner); - async_spawn({ + spawn({ let is_ready = Arc::clone(&self.inner.is_ready); async move { let bs_addrs = bootstrap_ref.read().await.clone(); // Add known peers to the network - handle.add_known_peers(bs_addrs).await.unwrap(); + handle.add_known_peers(bs_addrs).unwrap(); // Begin the bootstrap process - handle.begin_bootstrap().await?; + handle.begin_bootstrap()?; while !is_bootstrapped.load(Ordering::Relaxed) { - async_sleep(Duration::from_secs(1)).await; - handle.begin_bootstrap().await?; + sleep(Duration::from_secs(1)).await; + handle.begin_bootstrap()?; } // Subscribe to the QC topic @@ -648,7 +641,7 @@ impl Libp2pNetwork { .await .is_err() { - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } // Wait for the network to connect to the required number of peers @@ -668,19 +661,19 @@ impl Libp2pNetwork { } /// Handle events - async fn handle_recvd_events( + fn handle_recvd_events( &self, msg: NetworkEvent, - sender: &UnboundedSender>, + sender: &Sender>, ) -> Result<(), NetworkError> { match msg { GossipMsg(msg) => { - sender.send(msg).await.map_err(|err| { + sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) })?; } DirectRequest(msg, _pid, chan) => { - sender.send(msg).await.map_err(|err| { + sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!( "failed to send direct request message: {err}" )) @@ -696,7 +689,6 @@ impl Libp2pNetwork { )) })?, ) - .await .is_err() { error!("failed to ack!"); @@ -713,48 +705,39 @@ impl Libp2pNetwork { /// task to propagate messages to handlers /// terminates on shut down of network - fn handle_event_generator( - &self, - sender: UnboundedSender>, - mut network_rx: NetworkNodeReceiver, - ) { + fn handle_event_generator(&self, sender: Sender>, mut network_rx: NetworkNodeReceiver) { let handle = self.clone(); let is_bootstrapped = Arc::clone(&self.inner.is_bootstrapped); - async_spawn(async move { + spawn(async move { let Some(mut kill_switch) = network_rx.take_kill_switch() else { tracing::error!( "`spawn_handle` was called on a network handle that was already closed" ); return; }; - let mut kill_switch = kill_switch.recv().boxed(); - let mut next_msg = network_rx.recv().boxed(); loop { - let msg_or_killed = futures::future::select(next_msg, kill_switch).await; - match msg_or_killed { - Either::Left((Ok(message), other_stream)) => { - match &message { + select! { + msg = network_rx.recv() => { + let Ok(message) = msg else { + warn!("Network receiver shut down!"); + return; + }; + + match message { NetworkEvent::IsBootstrapped => { is_bootstrapped.store(true, Ordering::Relaxed); } GossipMsg(_) | DirectRequest(_, _, _) | DirectResponse(_, _) => { - let _ = handle.handle_recvd_events(message, &sender).await; + let _ = handle.handle_recvd_events(message, &sender); } NetworkEvent::ConnectedPeersUpdate(num_peers) => { - handle.inner.metrics.num_connected_peers.set(*num_peers); + handle.inner.metrics.num_connected_peers.set(num_peers); } } - // re-set the `kill_switch` for the next loop - kill_switch = other_stream; - // re-set `receiver.recv()` for the next loop - next_msg = network_rx.recv().boxed(); - } - Either::Left((Err(_), _)) => { - warn!("Network receiver shut down!"); - return; } - Either::Right(_) => { + + _kill_switch = kill_switch.recv() => { warn!("Event Handler shutdown"); return; } @@ -765,7 +748,7 @@ impl Libp2pNetwork { } #[async_trait] -impl ConnectedNetwork for Libp2pNetwork { +impl ConnectedNetwork for Libp2pNetwork { #[instrument(name = "Libp2pNetwork::ready_blocking", skip_all)] async fn wait_for_ready(&self) { self.wait_for_ready().await; @@ -810,7 +793,7 @@ impl ConnectedNetwork for Libp2pNetwork { let topic = topic.to_string(); if self.inner.subscribed_topics.contains(&topic) { // Short-circuit-send the message to ourselves - self.inner.sender.send(message.clone()).await.map_err(|_| { + self.inner.sender.try_send(message.clone()).map_err(|_| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown })?; @@ -830,19 +813,19 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - if let Err(e) = handle_2.gossip_no_serialize(topic_2, msg).await { + if let Err(e) = handle_2.gossip_no_serialize(topic_2, msg) { metrics_2.num_failed_messages.add(1); warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), ); - async_spawn(fut); + spawn(fut); return Ok(()); } } - if let Err(e) = self.inner.handle.gossip(topic, &message).await { + if let Err(e) = self.inner.handle.gossip(topic, &message) { self.inner.metrics.num_failed_messages.add(1); return Err(e); } @@ -854,7 +837,7 @@ impl ConnectedNetwork for Libp2pNetwork { async fn da_broadcast_message( &self, message: Vec, - recipients: Vec, + recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { // If we're not ready, return an error @@ -884,7 +867,11 @@ impl ConnectedNetwork for Libp2pNetwork { } #[instrument(name = "Libp2pNetwork::direct_message", skip_all)] - async fn direct_message(&self, message: Vec, recipient: K) -> Result<(), NetworkError> { + async fn direct_message( + &self, + message: Vec, + recipient: T::SignatureKey, + ) -> Result<(), NetworkError> { // If we're not ready, return an error if !self.is_ready() { self.inner.metrics.num_failed_messages.add(1); @@ -894,7 +881,7 @@ impl ConnectedNetwork for Libp2pNetwork { // short circuit if we're dming ourselves if recipient == self.inner.pk { // panic if we already shut down? - self.inner.sender.send(message).await.map_err(|_x| { + self.inner.sender.try_send(message).map_err(|_x| { self.inner.metrics.num_failed_messages.add(1); NetworkError::ShutDown })?; @@ -928,19 +915,19 @@ impl ConnectedNetwork for Libp2pNetwork { let handle_2 = Arc::clone(&handle); let metrics_2 = metrics.clone(); boxed_sync(async move { - if let Err(e) = handle_2.direct_request_no_serialize(pid, msg).await { + if let Err(e) = handle_2.direct_request_no_serialize(pid, msg) { metrics_2.num_failed_messages.add(1); warn!("Failed to broadcast to libp2p: {:?}", e); } }) }), ); - async_spawn(fut); + spawn(fut); return Ok(()); } } - match self.inner.handle.direct_request(pid, &message).await { + match self.inner.handle.direct_request(pid, &message) { Ok(()) => Ok(()), Err(e) => { self.inner.metrics.num_failed_messages.add(1); @@ -958,19 +945,22 @@ impl ConnectedNetwork for Libp2pNetwork { let result = self .inner .receiver + .lock() + .await .recv() .await - .map_err(|_x| NetworkError::ShutDown)?; + .ok_or(NetworkError::ShutDown)?; Ok(result) } #[instrument(name = "Libp2pNetwork::queue_node_lookup", skip_all)] + #[allow(clippy::type_complexity)] fn queue_node_lookup( &self, view_number: ViewNumber, - pk: K, - ) -> Result<(), TrySendError>> { + pk: T::SignatureKey, + ) -> Result<(), TrySendError>> { self.inner .node_lookup_send .try_send(Some((view_number, pk))) @@ -990,7 +980,7 @@ impl ConnectedNetwork for Libp2pNetwork { /// use of the future view and leader to queue the lookups. async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) where - TYPES: NodeType + 'a, + TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = ::Epoch::new(epoch); diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index e86c773e71..16eaa8ccbe 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -18,14 +18,9 @@ use std::{ }, }; -use async_compatibility_layer::{ - art::async_spawn, - channel::{bounded, BoundedStream, Receiver, SendError, Sender}, -}; use async_lock::{Mutex, RwLock}; use async_trait::async_trait; use dashmap::DashMap; -use futures::StreamExt; use hotshot_types::{ boxed_sync, traits::{ @@ -39,6 +34,10 @@ use hotshot_types::{ BoxSyncFuture, }; use rand::Rng; +use tokio::{ + spawn, + sync::mpsc::{channel, error::SendError, Receiver, Sender}, +}; use tracing::{debug, error, info, info_span, instrument, trace, warn, Instrument}; use super::{NetworkError, NetworkReliability}; @@ -119,17 +118,16 @@ impl MemoryNetwork { reliability_config: Option>, ) -> MemoryNetwork { info!("Attaching new MemoryNetwork"); - let (input, task_recv) = bounded(128); - let (task_send, output) = bounded(128); + let (input, mut task_recv) = channel(128); + let (task_send, output) = channel(128); let in_flight_message_count = AtomicUsize::new(0); trace!("Channels open, spawning background task"); - async_spawn( + spawn( async move { debug!("Starting background task"); - let mut task_stream: BoundedStream> = task_recv.into_stream(); trace!("Entering processing loop"); - while let Some(vec) = task_stream.next().await { + while let Some(vec) = task_recv.recv().await { trace!(?vec, "Incoming message"); // Attempt to decode message let ts = task_send.clone(); @@ -282,7 +280,7 @@ impl ConnectedNetwork for MemoryNetwork { }) }), ); - async_spawn(fut); + spawn(fut); } } else { let res = node.input(message.clone()).await; @@ -342,7 +340,7 @@ impl ConnectedNetwork for MemoryNetwork { }) }), ); - async_spawn(fut); + spawn(fut); } Ok(()) } else { @@ -377,7 +375,7 @@ impl ConnectedNetwork for MemoryNetwork { .await .recv() .await - .map_err(|_x| NetworkError::ShutDown)?; + .ok_or(NetworkError::ShutDown)?; self.inner .in_flight_message_count .fetch_sub(1, Ordering::Relaxed); diff --git a/crates/hotshot/src/traits/networking/push_cdn_network/mod.rs b/crates/hotshot/src/traits/networking/push_cdn_network/mod.rs index c60a5070a6..f9984db276 100644 --- a/crates/hotshot/src/traits/networking/push_cdn_network/mod.rs +++ b/crates/hotshot/src/traits/networking/push_cdn_network/mod.rs @@ -15,9 +15,6 @@ use std::sync::Arc; #[cfg(feature = "hotshot-testing")] use std::{path::Path, time::Duration}; -use async_compatibility_layer::channel::TrySendError; -#[cfg(feature = "hotshot-testing")] -use async_compatibility_layer::{art::async_sleep, art::async_spawn}; use async_trait::async_trait; use cdn_broker::reexports::def::hook::NoMessageHook; #[cfg(feature = "hotshot-testing")] @@ -50,6 +47,7 @@ use hotshot_types::{ use metrics::CdnMetricsValue; #[cfg(feature = "hotshot-testing")] use rand::{rngs::StdRng, RngCore, SeedableRng}; +use tokio::{spawn, sync::mpsc::error::TrySendError, time::sleep}; use tracing::error; use super::NetworkError; @@ -213,14 +211,14 @@ impl TestableNetworkingImplementation }; // Create and spawn the broker - async_spawn(async move { + spawn(async move { let broker: Broker> = Broker::new(config).await.expect("broker failed to start"); // If we are the first broker by identifier, we need to sleep a bit // for discovery to happen first if other_broker_identifier > broker_identifier { - async_sleep(Duration::from_secs(2)).await; + sleep(Duration::from_secs(2)).await; } // Error if we stopped unexpectedly @@ -246,7 +244,7 @@ impl TestableNetworkingImplementation }; // Spawn the marshal - async_spawn(async move { + spawn(async move { let marshal: Marshal> = Marshal::new(marshal_config) .await .expect("failed to spawn marshal"); @@ -345,11 +343,15 @@ impl ConnectedNetwork for PushCdnNetwork { topic: HotShotTopic, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } self.broadcast_message(message, topic.into()) .await - .map_err(|e| { + .inspect_err(|_e| { self.metrics.num_failed_messages.add(1); - e }) } @@ -364,11 +366,15 @@ impl ConnectedNetwork for PushCdnNetwork { _recipients: Vec, _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { + // If we're paused, don't send the message + #[cfg(feature = "hotshot-testing")] + if self.is_paused.load(Ordering::Relaxed) { + return Ok(()); + } self.broadcast_message(message, Topic::Da) .await - .map_err(|e| { + .inspect_err(|_e| { self.metrics.num_failed_messages.add(1); - e }) } @@ -410,6 +416,7 @@ impl ConnectedNetwork for PushCdnNetwork { // If we're paused, receive but don't process messages #[cfg(feature = "hotshot-testing")] if self.is_paused.load(Ordering::Relaxed) { + sleep(Duration::from_millis(100)).await; return Ok(vec![]); } diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 04c851c2c4..2ab7874b8f 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -70,6 +70,9 @@ pub struct SystemContextHandle, V: /// Memberships used by consensus pub memberships: Arc>, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl + 'static, V: Versions> @@ -222,7 +225,8 @@ impl + 'static, V: Versions> /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper - /// NOTE: this is only used for sanity checks in our tests + /// + /// NOTE: this is only used for sanity checks in our tests #[must_use] pub fn internal_event_stream_receiver_known_impl(&self) -> Receiver>> { self.internal_event_stream.1.activate_cloned() diff --git a/crates/libp2p-networking/Cargo.toml b/crates/libp2p-networking/Cargo.toml index 727ee9146a..4488b35b58 100644 --- a/crates/libp2p-networking/Cargo.toml +++ b/crates/libp2p-networking/Cargo.toml @@ -11,9 +11,11 @@ authors = { workspace = true } default = ["webui"] webui = [] +[dev-dependencies] +hotshot-example-types = { path = "../example-types" } + [dependencies] anyhow = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } blake3 = { workspace = true } @@ -40,14 +42,11 @@ lazy_static = { workspace = true } pin-project = "1" portpicker.workspace = true cbor4ii = "0.3" +tracing-subscriber = { workspace = true } -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] libp2p = { workspace = true, features = ["tokio"] } tokio = { workspace = true } tokio-stream = "0.1" -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -libp2p = { workspace = true, features = ["async-std"] } -async-std = { workspace = true } [lints] workspace = true diff --git a/crates/libp2p-networking/README.md b/crates/libp2p-networking/README.md deleted file mode 100644 index 62a190398b..0000000000 --- a/crates/libp2p-networking/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# USAGE - -Networking library intended for use with HotShot. Builds upon abstractions from libp2p-rs. - -## CLI Demo - -To get very verbose logging: - -```bash -RUST_LOG_OUTPUT=OUTFILE RUST_LOG="trace" cargo run --features=async-std-executor --release -``` - -The idea here is to spin up several nodes in a p2p network. These nodes can share messages with each other. - -``` -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- -p 1111" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/1111/quic-v1 -p 2222" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/2222/quic-v1 -p 3333" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/3333/quic-v1 -p 4444" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/4444/quic-v1 -p 5555" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/5555/quic-v1 -p 6666" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/6666/quic-v1 -p 7777" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/7777/quic-v1 -p 8888" -nix develop -c "RUST_LOG_OUTPUT=OUTFILE_0 RUST_LOG=error cargo run --features=async-std-executor --release --example clichat -- /ip4/127.0.0.1/udp/8888/quic-v1 -p 9999" -``` - -At this point the idea is that each node will continue to attempt to connect to nodes -until it hits at least 5 peers. - -Use `Tab` to switch between messages and prompt. Press `Enter` to broadcast a message to all connected nodes. -Press `Right Arrow` to direct-send a message to a randomly selected peer. -Press `q` to quit the program from the messages view. - -## Counter Single Machine Tests - -Each node has its own counter. The idea behind these tests is to support "broadcast" messages and "direct" messages to increment each nodes counter. - -`cargo test --features=async-std-executor --release stress` - -spawns off five integration tests. - -- Two that uses gossipsub to broadcast a counter increment from one node to all other nodes -- Two where one node increments its counter, then direct messages all nodes to increment their counters -- One that intersperses both broadcast and increments. -- One that intersperses both broadcast and increments. -- Two that publishes entries to the DHT and checks that other nodes can access these entries. - -This can fail on MacOS (and linux) due to "too many open files." The fix is: - -```bash -ulimit -n 4096 -``` - -## Counter Multi-machine tests - -In these tests, there are three types of nodes. `Regular` nodes that limit the number of incoming connections, `Bootstrap` nodes that allow all connections, and `Conductor` nodes that all nodes (bootstrap and regular) connect to and periodically ping with their state. This "conductor" node instructs nodes in the swarm to increment their state either via broadcast or direct messages in the same fashion as the single machine tests. - -In the direct message case, the conductor will increment the state of a randomly chosen node, `i`. Then the conductor will direct message all other nodes to request node `i`'s counter and increment their counter to the value in `i`'s node. In the broadcast case, the conductor will increment the state of a randomly chose node, `i`, and tell `i` to broadcast this incremented state. - -In both cases, the test terminates as successful when the conductor receives the incremented state from all other nodes. Then, the conductor sends a special "kill" message to all known nodes and waits for them to disconnect. - -Metadata about the toplogy is currently read from an `identity_mapping.json` file that manually labels the type of node (bootstrap, regular, conductor). The conductor uses this to figure out information about all nodes in the network. The regular nodes use this to learn about their ip address and the addresses necessary to bootstrap onto the network. The boostrap nodes only use this to learn about their ip addresses. - -### Running counter multi-machine tests - -A sample invocation locally: - -```bash -# run each line in a separate terminal -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8000 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8001 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8002 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8003 -nix develop -c cargo run --features webui,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --webui 127.0.0.1:8004 -``` - -### Network Emulation -One may introduce simulated network latency via the network emulationn queueing discipline. This is implemented in two ways: on what is assumed to be a AWS EC2 instance, and in a docker container. Example usage on AWS EC2 instance: - -```bash -# run each line in a separate AWS instance -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Metal -``` - -And on docker: - -```bash -# run each line in a separate Docker container instance -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9000 --node_type Bootstrap --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9001 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9002 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9003 --node_type Regular --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -nix develop -c cargo run --features lossy_network,async-std-executor --release --example counter -- --bound_addr 127.0.0.1:9004 --node_type Conductor --num_nodes 5 --bootstrap 127.0.0.1:9000 --env Docker -``` - -On an AWS instance, a separate network namespace is created and connected to `ens5` via a network bridge, and a netem qdisc is introduced to the veth interface in the namespace. Within a docker container, a netem qdisc is added on interface `eth0`. - -### Network Emulation Dockerfile - -Usage: - -``` -docker build . -t libp2p-networking -# expose ports -docker run -P 8000:8000 -P 9000:9000 libp2p-networking -``` - diff --git a/crates/libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/crates/libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 075a70ffa7..566db12d4c 100644 --- a/crates/libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/crates/libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -6,8 +6,8 @@ use std::time::Duration; -use async_compatibility_layer::{art, channel::UnboundedSender}; use futures::{channel::mpsc, StreamExt}; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::timeout}; use crate::network::ClientRequest; @@ -33,7 +33,7 @@ pub struct DHTBootstrapTask { impl DHTBootstrapTask { /// Run bootstrap task pub fn run(rx: mpsc::Receiver, tx: UnboundedSender) { - art::async_spawn(async move { + spawn(async move { let state = Self { rx, network_tx: tx, @@ -64,13 +64,12 @@ impl DHTBootstrapTask { break; } } - } else if let Ok(maybe_event) = - art::async_timeout(Duration::from_secs(120), self.rx.next()).await + } else if let Ok(maybe_event) = timeout(Duration::from_secs(120), self.rx.next()).await { match maybe_event { Some(InputEvent::StartBootstrap) => { tracing::debug!("Start bootstrap in bootstrap task"); - self.bootstrap().await; + self.bootstrap(); } Some(InputEvent::ShutdownBootstrap) => { tracing::debug!("ShutdownBootstrap received, shutting down"); @@ -86,13 +85,14 @@ impl DHTBootstrapTask { } } else { tracing::debug!("Start bootstrap in bootstrap task after timeout"); - self.bootstrap().await; + self.bootstrap(); } } } + /// Start bootstrap - async fn bootstrap(&mut self) { + fn bootstrap(&mut self) { self.in_progress = true; - let _ = self.network_tx.send(ClientRequest::BeginBootstrap).await; + let _ = self.network_tx.send(ClientRequest::BeginBootstrap); } } diff --git a/crates/libp2p-networking/src/network/behaviours/dht/mod.rs b/crates/libp2p-networking/src/network/behaviours/dht/mod.rs index 27bf8a1b71..f6923dbcb7 100644 --- a/crates/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/crates/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -13,7 +13,6 @@ use std::{ time::Duration, }; -use async_compatibility_layer::{art, channel::UnboundedSender}; /// a local caching layer for the DHT key value pairs use futures::{ channel::{mpsc, oneshot::Sender}, @@ -30,6 +29,7 @@ use libp2p::kad::{ }; use libp2p_identity::PeerId; use store::ValidatedStore; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; /// Additional DHT record functionality @@ -222,9 +222,9 @@ impl DHTBehaviour { retry_count: query.retry_count, }; let backoff = query.backoff.next_timeout(false); - art::async_spawn(async move { - art::async_sleep(backoff).await; - let _ = tx.send(req).await; + spawn(async move { + sleep(backoff).await; + let _ = tx.send(req); }); } @@ -238,9 +238,9 @@ impl DHTBehaviour { value: query.value, notify: query.notify, }; - art::async_spawn(async move { - art::async_sleep(query.backoff.next_timeout(false)).await; - let _ = tx.send(req).await; + spawn(async move { + sleep(query.backoff.next_timeout(false)).await; + let _ = tx.send(req); }); } @@ -397,9 +397,7 @@ impl DHTBehaviour { /// Send that the bootsrap suceeded fn finish_bootstrap(&mut self) { if let Some(mut tx) = self.bootstrap_tx.clone() { - art::async_spawn( - async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }, - ); + spawn(async move { tx.send(bootstrap::InputEvent::BootstrapFinished).await }); } } #[allow(clippy::too_many_lines)] diff --git a/crates/libp2p-networking/src/network/behaviours/direct_message.rs b/crates/libp2p-networking/src/network/behaviours/direct_message.rs index 61f64b8b91..72d378a587 100644 --- a/crates/libp2p-networking/src/network/behaviours/direct_message.rs +++ b/crates/libp2p-networking/src/network/behaviours/direct_message.rs @@ -6,12 +6,9 @@ use std::collections::HashMap; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::UnboundedSender, -}; use libp2p::request_response::{Event, Message, OutboundRequestId, ResponseChannel}; use libp2p_identity::PeerId; +use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; use super::exponential_backoff::ExponentialBackoff; @@ -75,15 +72,13 @@ impl DMBehaviour { } req.retry_count -= 1; if let Some(retry_tx) = retry_tx { - async_spawn(async move { - async_sleep(req.backoff.next_timeout(false)).await; - let _ = retry_tx - .send(ClientRequest::DirectRequest { - pid: peer, - contents: req.data, - retry_count: req.retry_count, - }) - .await; + spawn(async move { + sleep(req.backoff.next_timeout(false)).await; + let _ = retry_tx.send(ClientRequest::DirectRequest { + pid: peer, + contents: req.data, + retry_count: req.retry_count, + }); }); } } diff --git a/crates/libp2p-networking/src/network/cbor.rs b/crates/libp2p-networking/src/network/cbor.rs index a289b998b5..4a5685624b 100644 --- a/crates/libp2p-networking/src/network/cbor.rs +++ b/crates/libp2p-networking/src/network/cbor.rs @@ -1,3 +1,5 @@ +use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + use async_trait::async_trait; use cbor4ii::core::error::DecodeError; use futures::prelude::*; @@ -6,7 +8,6 @@ use libp2p::{ StreamProtocol, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; /// `Behaviour` type alias for the `Cbor` codec pub type Behaviour = request_response::Behaviour>; diff --git a/crates/libp2p-networking/src/network/mod.rs b/crates/libp2p-networking/src/network/mod.rs index dea939556a..21a2811bb8 100644 --- a/crates/libp2p-networking/src/network/mod.rs +++ b/crates/libp2p-networking/src/network/mod.rs @@ -19,14 +19,11 @@ pub mod cbor; use std::{collections::HashSet, fmt::Debug}; use futures::channel::oneshot::Sender; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; -#[cfg(async_executor_impl = "async-std")] -use libp2p::dns::async_std::Transport as DnsTransport; -#[cfg(async_executor_impl = "tokio")] -use libp2p::dns::tokio::Transport as DnsTransport; +use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::{ build_multiaddr, core::{muxing::StreamMuxerBox, transport::Boxed}, + dns::tokio::Transport as DnsTransport, gossipsub::Event as GossipEvent, identify::Event as IdentifyEvent, identity::Keypair, @@ -35,9 +32,6 @@ use libp2p::{ Multiaddr, Transport, }; use libp2p_identity::PeerId; -#[cfg(async_executor_impl = "async-std")] -use quic::async_std::Transport as QuicTransport; -#[cfg(async_executor_impl = "tokio")] use quic::tokio::Transport as QuicTransport; use tracing::instrument; use transport::StakeTableAuthentication; @@ -50,8 +44,6 @@ pub use self::{ RequestResponseConfig, DEFAULT_REPLICATION_FACTOR, }, }; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} /// Actions to send from the client to the swarm #[derive(Debug)] @@ -165,9 +157,9 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; /// # Errors /// If we could not create a DNS transport #[instrument(skip(identity))] -pub async fn gen_transport( +pub async fn gen_transport( identity: Keypair, - stake_table: Option>, + stake_table: Option, auth_message: Option>, ) -> Result { // Create the initial `Quic` transport @@ -178,16 +170,11 @@ pub async fn gen_transport( }; // Require authentication against the stake table - let transport = StakeTableAuthentication::new(transport, stake_table, auth_message); + let transport: StakeTableAuthentication<_, T, _> = + StakeTableAuthentication::new(transport, stake_table, auth_message); // Support DNS resolution let transport = { - #[cfg(async_executor_impl = "async-std")] - { - DnsTransport::system(transport).await - } - - #[cfg(async_executor_impl = "tokio")] { DnsTransport::system(transport) } diff --git a/crates/libp2p-networking/src/network/node.rs b/crates/libp2p-networking/src/network/node.rs index c3ec3f6316..7bf55ccc86 100644 --- a/crates/libp2p-networking/src/network/node.rs +++ b/crates/libp2p-networking/src/network/node.rs @@ -14,18 +14,13 @@ mod handle; use std::{ collections::{HashMap, HashSet}, iter, - marker::PhantomData, num::{NonZeroU32, NonZeroUsize}, time::Duration, }; -use async_compatibility_layer::{ - art::async_spawn, - channel::{unbounded, UnboundedReceiver, UnboundedRecvError, UnboundedSender}, -}; -use futures::{channel::mpsc, select, FutureExt, SinkExt, StreamExt}; +use futures::{channel::mpsc, SinkExt, StreamExt}; use hotshot_types::{ - constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::signature_key::SignatureKey, + constants::KAD_DEFAULT_REPUB_INTERVAL_SEC, traits::node_implementation::NodeType, }; use libp2p::{ autonat, @@ -48,6 +43,10 @@ use libp2p::{ }; use libp2p_identity::PeerId; use rand::{prelude::SliceRandom, thread_rng}; +use tokio::{ + select, spawn, + sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, +}; use tracing::{debug, error, info, info_span, instrument, warn, Instrument}; pub use self::{ @@ -83,32 +82,29 @@ pub const ESTABLISHED_LIMIT_UNWR: u32 = 10; /// Network definition #[derive(custom_debug::Debug)] -pub struct NetworkNode { +pub struct NetworkNode { /// The keypair for the node keypair: Keypair, /// peer id of network node peer_id: PeerId, /// the swarm of networkbehaviours #[debug(skip)] - swarm: Swarm>, + swarm: Swarm>, /// the configuration parameters of the netework - config: NetworkNodeConfig, + config: NetworkNodeConfig, /// the listener id we are listening on, if it exists listener_id: Option, /// Handler for direct messages direct_message_state: DMBehaviour, /// Handler for DHT Events - dht_handler: DHTBehaviour, + dht_handler: DHTBehaviour, /// Channel to resend requests, set to Some when we call `spawn_listeners` resend_tx: Option>, /// Send to the bootstrap task to tell it to start a bootstrap bootstrap_tx: Option>, - - /// Phantom data to hold the key type - pd: PhantomData, } -impl NetworkNode { +impl NetworkNode { /// Returns number of peers this node is connected to pub fn num_connected(&self) -> usize { self.swarm.connected_peers().count() @@ -168,7 +164,7 @@ impl NetworkNode { /// * Generates a connection to the "broadcast" topic /// * Creates a swarm to manage peers and events #[instrument] - pub async fn new(config: NetworkNodeConfig) -> Result { + pub async fn new(config: NetworkNodeConfig) -> Result { // Generate a random `KeyPair` if one is not specified let keypair = config .keypair @@ -179,7 +175,7 @@ impl NetworkNode { let peer_id = PeerId::from(keypair.public()); // Generate the transport from the keypair, stake table, and auth message - let transport: BoxedTransport = gen_transport::( + let transport: BoxedTransport = gen_transport::( keypair.clone(), config.stake_table.clone(), config.auth_message.clone(), @@ -187,7 +183,7 @@ impl NetworkNode { .await?; // Generate the swarm - let mut swarm: Swarm> = { + let mut swarm: Swarm> = { // Use the `Blake3` hash of the message's contents as the ID let message_id_fn = |message: &GossipsubMessage| { let hash = blake3::hash(&message.data); @@ -304,9 +300,6 @@ impl NetworkNode { // build swarm let swarm = SwarmBuilder::with_existing_identity(keypair.clone()); - #[cfg(async_executor_impl = "async-std")] - let swarm = swarm.with_async_std(); - #[cfg(async_executor_impl = "tokio")] let swarm = swarm.with_tokio(); swarm @@ -337,7 +330,6 @@ impl NetworkNode { ), resend_tx: None, bootstrap_tx: None, - pd: PhantomData, }) } @@ -382,11 +374,11 @@ impl NetworkNode { #[instrument(skip(self))] async fn handle_client_requests( &mut self, - msg: Result, + msg: Option, ) -> Result { let behaviour = self.swarm.behaviour_mut(); match msg { - Ok(msg) => { + Some(msg) => { match msg { ClientRequest::BeginBootstrap => { debug!("Beginning Libp2p bootstrap"); @@ -496,8 +488,8 @@ impl NetworkNode { } } } - Err(e) => { - error!("Error receiving msg in main behaviour loop: {:?}", e); + None => { + error!("Error receiving msg in main behaviour loop: channel closed"); } } Ok(false) @@ -539,7 +531,6 @@ impl NetworkNode { // Send the number of connected peers to the client send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::ConnectionClosed { @@ -564,7 +555,6 @@ impl NetworkNode { // Send the number of connected peers to the client send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } SwarmEvent::Dialing { @@ -673,7 +663,6 @@ impl NetworkNode { // forward messages directly to Client send_to_client .send(event) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } } @@ -730,16 +719,15 @@ impl NetworkNode { ), NetworkError, > { - let (s_input, s_output) = unbounded::(); - let (r_input, r_output) = unbounded::(); + let (s_input, mut s_output) = unbounded_channel::(); + let (r_input, r_output) = unbounded_channel::(); let (mut bootstrap_tx, bootstrap_rx) = mpsc::channel(100); self.resend_tx = Some(s_input.clone()); self.dht_handler.set_bootstrap_sender(bootstrap_tx.clone()); DHTBootstrapTask::run(bootstrap_rx, s_input.clone()); - async_spawn( + spawn( async move { - let mut fuse = s_output.recv().boxed().fuse(); loop { select! { event = self.swarm.next() => { @@ -749,14 +737,13 @@ impl NetworkNode { self.handle_swarm_events(event, &r_input).await?; } }, - msg = fuse => { + msg = s_output.recv() => { debug!("peerid {:?}\t\thandling msg {:?}", self.peer_id, msg); let shutdown = self.handle_client_requests(msg).await?; if shutdown { let _ = bootstrap_tx.send(InputEvent::ShutdownBootstrap).await; break } - fuse = s_output.recv().boxed().fuse(); } } } diff --git a/crates/libp2p-networking/src/network/node/config.rs b/crates/libp2p-networking/src/network/node/config.rs index db9d5c13ad..2dfaaa3639 100644 --- a/crates/libp2p-networking/src/network/node/config.rs +++ b/crates/libp2p-networking/src/network/node/config.rs @@ -6,7 +6,7 @@ use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::node_implementation::NodeType; use libp2p::{identity::Keypair, Multiaddr}; use libp2p_identity::PeerId; @@ -17,7 +17,7 @@ pub const DEFAULT_REPLICATION_FACTOR: Option = NonZeroUsize::new(1 /// describe the configuration of the network #[derive(Clone, Default, derive_builder::Builder, custom_debug::Debug)] -pub struct NetworkNodeConfig { +pub struct NetworkNodeConfig { /// The keypair for the node #[builder(setter(into, strip_option), default)] #[debug(skip)] @@ -49,7 +49,7 @@ pub struct NetworkNodeConfig { /// The stake table. Used for authenticating other nodes. If not supplied /// we will not check other nodes against the stake table #[builder(default)] - pub stake_table: Option>, + pub stake_table: Option, /// The signed authentication message sent to the remote peer /// If not supplied we will not send an authentication message during the handshake diff --git a/crates/libp2p-networking/src/network/node/handle.rs b/crates/libp2p-networking/src/network/node/handle.rs index 3ec68665f2..b7a6832286 100644 --- a/crates/libp2p-networking/src/network/node/handle.rs +++ b/crates/libp2p-networking/src/network/node/handle.rs @@ -4,15 +4,15 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashSet, fmt::Debug, marker::PhantomData, time::Duration}; +use std::{collections::HashSet, fmt::Debug, time::Duration}; -use async_compatibility_layer::{ - art::{async_sleep, async_timeout}, - channel::{Receiver, UnboundedReceiver, UnboundedSender}, -}; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; +use hotshot_types::traits::{network::NetworkError, node_implementation::NodeType}; use libp2p::{request_response::ResponseChannel, Multiaddr}; use libp2p_identity::PeerId; +use tokio::{ + sync::mpsc::{Receiver, UnboundedReceiver, UnboundedSender}, + time::{sleep, timeout}, +}; use tracing::{debug, info, instrument}; use crate::network::{ @@ -24,9 +24,9 @@ use crate::network::{ /// - A reference to the state /// - Controls for the swarm #[derive(Debug, Clone)] -pub struct NetworkNodeHandle { +pub struct NetworkNodeHandle { /// network configuration - network_config: NetworkNodeConfig, + network_config: NetworkNodeConfig, /// send an action to the networkbehaviour send_network: UnboundedSender, @@ -39,9 +39,6 @@ pub struct NetworkNodeHandle { /// human readable id id: usize, - - /// Phantom data to hold the key type - pd: PhantomData, } /// internal network node receiver @@ -58,11 +55,13 @@ impl NetworkNodeReceiver { /// recv a network event /// # Errors /// Errors if the receiver channel is closed - pub async fn recv(&self) -> Result { + pub async fn recv(&mut self) -> Result { self.receiver .recv() .await - .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) + .ok_or(NetworkError::ChannelReceiveError( + "Receiver channel closed".to_string(), + )) } /// Add a kill switch to the receiver pub fn set_kill_switch(&mut self, kill_switch: Receiver<()>) { @@ -78,10 +77,10 @@ impl NetworkNodeReceiver { /// Spawn a network node task task and return the handle and the receiver for it /// # Errors /// Errors if spawning the task fails -pub async fn spawn_network_node( - config: NetworkNodeConfig, +pub async fn spawn_network_node( + config: NetworkNodeConfig, id: usize, -) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { +) -> Result<(NetworkNodeReceiver, NetworkNodeHandle), NetworkError> { let mut network = NetworkNode::new(config.clone()) .await .map_err(|e| NetworkError::ConfigError(format!("failed to create network node: {e}")))?; @@ -104,33 +103,32 @@ pub async fn spawn_network_node( recv_kill: None, }; - let handle = NetworkNodeHandle:: { + let handle = NetworkNodeHandle:: { network_config: config, send_network: send_chan, listen_addr, peer_id, id, - pd: PhantomData, }; Ok((receiver, handle)) } -impl NetworkNodeHandle { +impl NetworkNodeHandle { /// Cleanly shuts down a swarm node /// This is done by sending a message to /// the swarm itself to spin down #[instrument] pub async fn shutdown(&self) -> Result<(), NetworkError> { - self.send_request(ClientRequest::Shutdown).await?; + self.send_request(ClientRequest::Shutdown)?; Ok(()) } /// Notify the network to begin the bootstrap process /// # Errors /// If unable to send via `send_network`. This should only happen /// if the network is shut down. - pub async fn begin_bootstrap(&self) -> Result<(), NetworkError> { + pub fn begin_bootstrap(&self) -> Result<(), NetworkError> { let req = ClientRequest::BeginBootstrap; - self.send_request(req).await + self.send_request(req) } /// Get a reference to the network node handle's listen addr. @@ -146,7 +144,7 @@ impl NetworkNodeHandle { pub async fn print_routing_table(&self) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetRoutingTable(s); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|e| NetworkError::ChannelReceiveError(e.to_string())) } @@ -174,7 +172,7 @@ impl NetworkNodeHandle { ); // Sleep for a second before checking again - async_sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(1)).await; } Ok(()) @@ -187,7 +185,7 @@ impl NetworkNodeHandle { pub async fn lookup_pid(&self, peer_id: PeerId) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::LookupPeer(peer_id, s); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -217,7 +215,7 @@ impl NetworkNodeHandle { pub async fn put_record( &self, key: RecordKey, - value: RecordValue, + value: RecordValue, ) -> Result<(), NetworkError> { // Serialize the key let key = key.to_bytes(); @@ -233,7 +231,7 @@ impl NetworkNodeHandle { notify: s, }; - self.send_request(req).await?; + self.send_request(req)?; r.await.map_err(|_| NetworkError::RequestCancelled) } @@ -257,13 +255,13 @@ impl NetworkNodeHandle { notify: s, retry_count, }; - self.send_request(req).await?; + self.send_request(req)?; // Map the error let result = r.await.map_err(|_| NetworkError::RequestCancelled)?; // Deserialize the record's value - let record: RecordValue = bincode::deserialize(&result) + let record: RecordValue = bincode::deserialize(&result) .map_err(|e| NetworkError::FailedToDeserialize(e.to_string()))?; Ok(record.value().to_vec()) @@ -277,9 +275,9 @@ impl NetworkNodeHandle { pub async fn get_record_timeout( &self, key: RecordKey, - timeout: Duration, + timeout_duration: Duration, ) -> Result, NetworkError> { - async_timeout(timeout, self.get_record(key, 3)) + timeout(timeout_duration, self.get_record(key, 3)) .await .map_err(|err| NetworkError::Timeout(err.to_string()))? } @@ -292,10 +290,10 @@ impl NetworkNodeHandle { pub async fn put_record_timeout( &self, key: RecordKey, - value: RecordValue, - timeout: Duration, + value: RecordValue, + timeout_duration: Duration, ) -> Result<(), NetworkError> { - async_timeout(timeout, self.put_record(key, value)) + timeout(timeout_duration, self.put_record(key, value)) .await .map_err(|err| NetworkError::Timeout(err.to_string()))? } @@ -306,7 +304,7 @@ impl NetworkNodeHandle { pub async fn subscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Subscribe(topic, Some(s)); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -317,7 +315,7 @@ impl NetworkNodeHandle { pub async fn unsubscribe(&self, topic: String) -> Result<(), NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::Unsubscribe(topic, Some(s)); - self.send_request(req).await?; + self.send_request(req)?; r.await .map_err(|err| NetworkError::ChannelReceiveError(err.to_string())) } @@ -326,24 +324,24 @@ impl NetworkNodeHandle { /// e.g. maintain their connection /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - pub async fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkError> { + pub fn ignore_peers(&self, peers: Vec) -> Result<(), NetworkError> { let req = ClientRequest::IgnorePeers(peers); - self.send_request(req).await + self.send_request(req) } /// Make a direct request to `peer_id` containing `msg` /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_request(&self, pid: PeerId, msg: &[u8]) -> Result<(), NetworkError> { - self.direct_request_no_serialize(pid, msg.to_vec()).await + pub fn direct_request(&self, pid: PeerId, msg: &[u8]) -> Result<(), NetworkError> { + self.direct_request_no_serialize(pid, msg.to_vec()) } /// Make a direct request to `peer_id` containing `msg` without serializing /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_request_no_serialize( + pub fn direct_request_no_serialize( &self, pid: PeerId, contents: Vec, @@ -353,20 +351,20 @@ impl NetworkNodeHandle { contents, retry_count: 1, }; - self.send_request(req).await + self.send_request(req) } /// Reply with `msg` to a request over `chan` /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn direct_response( + pub fn direct_response( &self, chan: ResponseChannel>, msg: &[u8], ) -> Result<(), NetworkError> { let req = ClientRequest::DirectResponse(chan, msg.to_vec()); - self.send_request(req).await + self.send_request(req) } /// Forcefully disconnect from a peer @@ -376,52 +374,47 @@ impl NetworkNodeHandle { /// # Panics /// If channel errors out /// shouldn't happen. - pub async fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkError> { + pub fn prune_peer(&self, pid: PeerId) -> Result<(), NetworkError> { let req = ClientRequest::Prune(pid); - self.send_request(req).await + self.send_request(req) } /// Gossip a message to peers /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkError> { - self.gossip_no_serialize(topic, msg.to_vec()).await + pub fn gossip(&self, topic: String, msg: &[u8]) -> Result<(), NetworkError> { + self.gossip_no_serialize(topic, msg.to_vec()) } /// Gossip a message to peers without serializing /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed /// - Will return [`NetworkError::FailedToSerialize`] when unable to serialize `msg` - pub async fn gossip_no_serialize( - &self, - topic: String, - msg: Vec, - ) -> Result<(), NetworkError> { + pub fn gossip_no_serialize(&self, topic: String, msg: Vec) -> Result<(), NetworkError> { let req = ClientRequest::GossipMsg(topic, msg); - self.send_request(req).await + self.send_request(req) } /// Tell libp2p about known network nodes /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - pub async fn add_known_peers( + pub fn add_known_peers( &self, known_peers: Vec<(PeerId, Multiaddr)>, ) -> Result<(), NetworkError> { debug!("Adding {} known peers", known_peers.len()); let req = ClientRequest::AddKnownPeers(known_peers); - self.send_request(req).await + self.send_request(req) } /// Send a client request to the network /// /// # Errors /// - Will return [`NetworkError::ChannelSendError`] when underlying `NetworkNode` has been killed - async fn send_request(&self, req: ClientRequest) -> Result<(), NetworkError> { + fn send_request(&self, req: ClientRequest) -> Result<(), NetworkError> { self.send_network .send(req) - .await .map_err(|err| NetworkError::ChannelSendError(err.to_string())) } @@ -435,7 +428,7 @@ impl NetworkNodeHandle { pub async fn num_connected(&self) -> Result { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeerNum(s); - self.send_request(req).await?; + self.send_request(req)?; Ok(r.await.unwrap()) } @@ -449,7 +442,7 @@ impl NetworkNodeHandle { pub async fn connected_pids(&self) -> Result, NetworkError> { let (s, r) = futures::channel::oneshot::channel(); let req = ClientRequest::GetConnectedPeers(s); - self.send_request(req).await?; + self.send_request(req)?; Ok(r.await.unwrap()) } @@ -467,7 +460,7 @@ impl NetworkNodeHandle { /// Return a reference to the network config #[must_use] - pub fn config(&self) -> &NetworkNodeConfig { + pub fn config(&self) -> &NetworkNodeConfig { &self.network_config } } diff --git a/crates/libp2p-networking/src/network/transport.rs b/crates/libp2p-networking/src/network/transport.rs index ea58db2001..7e5987e306 100644 --- a/crates/libp2p-networking/src/network/transport.rs +++ b/crates/libp2p-networking/src/network/transport.rs @@ -1,7 +1,5 @@ use std::{ - collections::HashSet, future::Future, - hash::BuildHasher, io::{Error as IoError, ErrorKind as IoErrorKind}, pin::Pin, sync::Arc, @@ -9,9 +7,12 @@ use std::{ }; use anyhow::{ensure, Context, Result as AnyhowResult}; -use async_compatibility_layer::art::async_timeout; use futures::{future::poll_fn, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use hotshot_types::traits::signature_key::SignatureKey; +use hotshot_types::traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::SignatureKey, +}; use libp2p::{ core::{muxing::StreamMuxerExt, transport::TransportEvent, StreamMuxer}, identity::PeerId, @@ -19,6 +20,7 @@ use libp2p::{ }; use pin_project::pin_project; use serde::{Deserialize, Serialize}; +use tokio::time::timeout; use tracing::warn; /// The maximum size of an authentication message. This is used to prevent @@ -34,14 +36,13 @@ const AUTH_HANDSHAKE_TIMEOUT: std::time::Duration = std::time::Duration::from_se /// by performing a handshake that checks if the remote peer is present in the /// stake table. #[pin_project] -pub struct StakeTableAuthentication -{ +pub struct StakeTableAuthentication { #[pin] /// The underlying transport we are wrapping pub inner: T, /// The stake table we check against to authenticate connections - pub stake_table: Arc>>, + pub stake_table: Arc>, /// A pre-signed message that we send to the remote peer for authentication pub auth_message: Arc>>, @@ -54,10 +55,14 @@ pub struct StakeTableAuthentication = Pin::Output, ::Error>> + Send>>; -impl StakeTableAuthentication { +impl StakeTableAuthentication { /// Create a new `StakeTableAuthentication` transport that wraps the given transport /// and authenticates connections against the stake table. - pub fn new(inner: T, stake_table: Option>, auth_message: Option>) -> Self { + pub fn new( + inner: T, + stake_table: Option, + auth_message: Option>, + ) -> Self { Self { inner, stake_table: Arc::from(stake_table), @@ -98,9 +103,9 @@ impl StakeTableAuthentica /// - The message is invalid /// - The peer is not in the stake table /// - The signature is invalid - pub async fn verify_peer_authentication( + pub async fn verify_peer_authentication( stream: &mut R, - stake_table: Arc>>, + stake_table: Arc>, required_peer_id: &PeerId, ) -> AnyhowResult<()> { // If we have a stake table, check if the remote peer is in it @@ -109,7 +114,7 @@ impl StakeTableAuthentica let message = read_length_delimited(stream, MAX_AUTH_MESSAGE_SIZE).await?; // Deserialize the authentication message - let auth_message: AuthMessage = bincode::deserialize(&message) + let auth_message: AuthMessage = bincode::deserialize(&message) .with_context(|| "Failed to deserialize auth message")?; // Verify the signature on the public keys @@ -127,7 +132,7 @@ impl StakeTableAuthentica } // Check if the public key is in the stake table - if !stake_table.contains(&public_key) { + if !stake_table.has_stake(&public_key, Types::Epoch::new(0)) { return Err(anyhow::anyhow!("Peer not in stake table")); } } @@ -142,7 +147,7 @@ impl StakeTableAuthentica fn gen_handshake> + Send + 'static>( original_future: F, outgoing: bool, - stake_table: Arc>>, + stake_table: Arc>, auth_message: Arc>>, ) -> UpgradeFuture where @@ -157,7 +162,7 @@ impl StakeTableAuthentica let mut stream = original_future.await?; // Time out the authentication block - async_timeout(AUTH_HANDSHAKE_TIMEOUT, async { + timeout(AUTH_HANDSHAKE_TIMEOUT, async { // Open a substream for the handshake. // The handshake order depends on whether the connection is incoming or outgoing. let mut substream = if outgoing { @@ -286,8 +291,8 @@ pub fn construct_auth_message( bincode::serialize(&auth_message).with_context(|| "Failed to serialize auth message") } -impl Transport - for StakeTableAuthentication +impl Transport + for StakeTableAuthentication where T::Dial: Future> + Send + 'static, T::ListenerUpgrade: Send + 'static, @@ -513,16 +518,22 @@ pub async fn write_length_delimited( #[cfg(test)] mod test { - use std::{collections::HashSet, sync::Arc}; - - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; + use std::sync::Arc; + + use hotshot_example_types::node_types::TestTypes; + use hotshot_types::{ + light_client::StateVerKey, + signature_key::BLSPubKey, + traits::{network::Topic, signature_key::SignatureKey}, + PeerConfig, + }; use libp2p::{core::transport::dummy::DummyTransport, quic::Connection}; use rand::Rng; use super::*; /// A mock type to help with readability - type MockStakeTableAuth = StakeTableAuthentication; + type MockStakeTableAuth = StakeTableAuthentication; // Helper macro for generating a new identity and authentication message macro_rules! new_identity { @@ -619,8 +630,7 @@ mod test { assert!(public_key.is_err()); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn valid_authentication() { // Create a new identity let (keypair, peer_id, auth_message) = new_identity!(); @@ -629,8 +639,15 @@ mod test { let mut stream = cursor_from!(auth_message); // Create a stake table with the key - let mut stake_table = std::collections::HashSet::new(); - stake_table.insert(keypair.0); + let peer_config = PeerConfig { + stake_table_entry: keypair.0.stake_table_entry(1), + state_ver_key: StateVerKey::default(), + }; + let stake_table = ::Membership::new( + vec![peer_config.clone()], + vec![peer_config], + Topic::Global, + ); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -646,8 +663,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn key_not_in_stake_table() { // Create a new identity let (_, peer_id, auth_message) = new_identity!(); @@ -656,7 +672,7 @@ mod test { let mut stream = cursor_from!(auth_message); // Create an empty stake table - let stake_table: HashSet = std::collections::HashSet::new(); + let stake_table = ::Membership::new(vec![], vec![], Topic::Global); // Verify the authentication message let result = MockStakeTableAuth::verify_peer_authentication( @@ -676,8 +692,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn peer_id_mismatch() { // Create a new identity and authentication message let (keypair, _, auth_message) = new_identity!(); @@ -689,8 +704,15 @@ mod test { let mut stream = cursor_from!(auth_message); // Create a stake table with the key - let mut stake_table: HashSet = std::collections::HashSet::new(); - stake_table.insert(keypair.0); + let peer_config = PeerConfig { + stake_table_entry: keypair.0.stake_table_entry(1), + state_ver_key: StateVerKey::default(), + }; + let stake_table = ::Membership::new( + vec![peer_config.clone()], + vec![peer_config], + Topic::Global, + ); // Check against the malicious peer ID let result = MockStakeTableAuth::verify_peer_authentication( @@ -710,8 +732,7 @@ mod test { ); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn read_and_write_length_delimited() { // Create a message let message = b"Hello, world!"; diff --git a/crates/libp2p-networking/test.py b/crates/libp2p-networking/test.py deleted file mode 100755 index 66368b297e..0000000000 --- a/crates/libp2p-networking/test.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 - -from enum import Enum -from functools import reduce -from typing import Final -from subprocess import run, Popen -from time import sleep -from os import environ - -class NodeType(Enum): - CONDUCTOR = "Conductor" - REGULAR = "Regular" - BOOTSTRAP = "Bootstrap" - -def gen_invocation( - node_type: NodeType, - num_nodes: int, - to_connect_addrs: list[str], - conductor_addr: str, - num_rounds: int, - bound_addr: str, - ) -> tuple[list[str], str]: - aggr_list = lambda x, y: f'{x},{y}' - to_connect_list : Final[str] = reduce(aggr_list, to_connect_addrs); - out_file_name : Final[str] = f'out_{node_type}_{bound_addr[-4:]}'; - fmt_cmd = [ - f'cargo run --no-default-features --features=async-std-executor --example=counter --profile=release-lto -- ' \ - f' --bound_addr={bound_addr} '\ - f' --node_type={node_type.value} '\ - f' --num_nodes={num_nodes} '\ - f' --num_gossip={num_rounds} '\ - f' --to_connect_addrs={to_connect_list} '\ - f' --conductor_addr={conductor_addr} ']; - return (fmt_cmd, out_file_name) - -# construct a map: - -if __name__ == "__main__": - # cleanup - - run("rm -f out_*".split()) - - - # params - START_PORT : Final[int] = 9100; - NUM_REGULAR_NODES : Final[int] = 100; - NUM_NODES_PER_BOOTSTRAP : Final[int] = 10; - NUM_BOOTSTRAP : Final[int] = (int) (NUM_REGULAR_NODES / NUM_NODES_PER_BOOTSTRAP); - TOTAL_NUM_NODES: Final[int] = NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1; - NUM_ROUNDS = 100; - - bootstrap_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT}', range(0, NUM_BOOTSTRAP))); - normal_nodes_addrs : Final[list[str]] = list(map(lambda x: f'127.0.0.1:{x + START_PORT + NUM_BOOTSTRAP}', range(0, NUM_REGULAR_NODES))); - conductor_addr : str = f'127.0.0.1:{START_PORT + NUM_BOOTSTRAP + NUM_REGULAR_NODES + 1}'; - - regular_cmds : list[tuple[list[str], str]] = []; - bootstrap_cmds : list[tuple[list[str], str]] = []; - print("doing conductor") - conductor_cmd : Final[tuple[list[str], str]] = \ - gen_invocation( - node_type=NodeType.CONDUCTOR, - num_nodes=TOTAL_NUM_NODES, - to_connect_addrs=bootstrap_addrs + normal_nodes_addrs,# + normal_nodes_addrs + [conductor_addr], - conductor_addr=conductor_addr, - num_rounds=NUM_ROUNDS, - bound_addr=conductor_addr - ); - print("dfone concuctor") - - for i in range(0, len(bootstrap_addrs)): - bootstrap_addr = bootstrap_addrs[i]; - regulars_list = normal_nodes_addrs[i * NUM_NODES_PER_BOOTSTRAP: (i + 1) * NUM_NODES_PER_BOOTSTRAP]; - - bootstrap_cmd = gen_invocation( - node_type=NodeType.BOOTSTRAP, - num_nodes=TOTAL_NUM_NODES, - to_connect_addrs=bootstrap_addrs, - conductor_addr=conductor_addr, - num_rounds=NUM_ROUNDS, - bound_addr=bootstrap_addr, - ); - bootstrap_cmds.append(bootstrap_cmd); - - for regular_addr in regulars_list: - regular_cmd = gen_invocation( - node_type=NodeType.REGULAR, - num_nodes=TOTAL_NUM_NODES, - # NOTE may need to remove regular_addr from regulars_list - to_connect_addrs= [bootstrap_addr], - num_rounds=NUM_ROUNDS, - bound_addr=regular_addr, - conductor_addr=conductor_addr - ); - regular_cmds.append(regular_cmd); - - print(regular_cmds) - - TIME_TO_SPIN_UP_BOOTSTRAP : Final[int] = 0; - TIME_TO_SPIN_UP_REGULAR : Final[int] = 0; - env = environ.copy(); - env["RUST_BACKTRACE"] = "full" - - print("spinning up bootstrap") - for (node_cmd, file_name) in bootstrap_cmds: - print("running bootstrap", file_name) - file = open(file_name, 'w') - Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - - sleep(TIME_TO_SPIN_UP_BOOTSTRAP); - - print("spinning up regulars") - for (node_cmd, file_name) in regular_cmds: - file = open(file_name, 'w') - Popen(node_cmd[0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - - sleep(TIME_TO_SPIN_UP_REGULAR); - - file = open(conductor_cmd[1], 'w') - print("spinning up conductor") - Popen(conductor_cmd[0][0].split(), start_new_session=True, stdout=file, stderr=file, env=env); - diff --git a/crates/libp2p-networking/tests/common/mod.rs b/crates/libp2p-networking/tests/common/mod.rs deleted file mode 100644 index c4620baf9e..0000000000 --- a/crates/libp2p-networking/tests/common/mod.rs +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, - num::NonZeroUsize, - str::FromStr, - sync::Arc, - time::Duration, -}; - -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - async_primitives::subscribable_mutex::SubscribableMutex, - channel::bounded, - logging::{setup_backtrace, setup_logging}, -}; -use futures::{future::join_all, Future, FutureExt}; -use hotshot_types::traits::{network::NetworkError, signature_key::SignatureKey}; -use libp2p::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_networking::network::{ - spawn_network_node, NetworkEvent, NetworkNodeConfigBuilder, NetworkNodeHandle, - NetworkNodeReceiver, -}; -use thiserror::Error; -use tracing::{instrument, warn}; - -#[derive(Clone, Debug)] -pub(crate) struct HandleWithState { - pub(crate) handle: Arc>, - pub(crate) state: Arc>, -} - -/// Spawn a handler `F` that will be notified every time a new [`NetworkEvent`] arrives. -/// -/// # Panics -/// -/// Will panic if a handler is already spawned -pub fn spawn_handler( - handle_and_state: HandleWithState, - mut receiver: NetworkNodeReceiver, - cb: F, -) -> impl Future -where - F: Fn(NetworkEvent, HandleWithState) -> RET + Sync + Send + 'static, - RET: Future> + Send + 'static, - S: Debug + Default + Send + Clone + 'static, -{ - async_spawn(async move { - let Some(mut kill_switch) = receiver.take_kill_switch() else { - tracing::error!( - "`spawn_handle` was called on a network handle that was already closed" - ); - return; - }; - let mut next_msg = receiver.recv().boxed(); - let mut kill_switch = kill_switch.recv().boxed(); - loop { - match futures::future::select(next_msg, kill_switch).await { - futures::future::Either::Left((incoming_message, other_stream)) => { - let incoming_message = match incoming_message { - Ok(msg) => msg, - Err(e) => { - tracing::warn!(?e, "NetworkNodeHandle::spawn_handle was unable to receive more messages"); - return; - } - }; - if let Err(e) = cb(incoming_message, handle_and_state.clone()).await { - tracing::error!(?e, "NetworkNodeHandle::spawn_handle returned an error"); - return; - } - - // re-set the `kill_switch` for the next loop - kill_switch = other_stream; - // re-set `receiver.recv()` for the next loop - next_msg = receiver.recv().boxed(); - } - futures::future::Either::Right(_) => { - return; - } - } - } - }) -} - -/// General function to spin up testing infra -/// perform tests by calling `run_test` -/// then cleans up tests -/// # Panics -/// Panics if unable to: -/// - Initialize logging -/// - Initialize network nodes -/// - Kill network nodes -/// - A test assertion fails -pub async fn test_bed< - S: 'static + Send + Default + Debug + Clone, - F, - FutF, - G, - FutG, - K: SignatureKey + 'static, ->( - run_test: F, - client_handler: G, - num_nodes: usize, - timeout: Duration, -) where - FutF: Future, - FutG: Future> + 'static + Send + Sync, - F: FnOnce(Vec>, Duration) -> FutF, - G: Fn(NetworkEvent, HandleWithState) -> FutG + 'static + Send + Sync + Clone, -{ - setup_logging(); - setup_backtrace(); - - let mut kill_switches = Vec::new(); - // NOTE we want this to panic if we can't spin up the swarms. - // that amounts to a failed test. - let handles_and_receivers = spin_up_swarms::(num_nodes, timeout).await.unwrap(); - - let (handles, receivers): (Vec<_>, Vec<_>) = handles_and_receivers.into_iter().unzip(); - let mut handler_futures = Vec::new(); - for (i, mut rx) in receivers.into_iter().enumerate() { - let (kill_tx, kill_rx) = bounded(1); - let handle = &handles[i]; - kill_switches.push(kill_tx); - rx.set_kill_switch(kill_rx); - let handler_fut = spawn_handler(handle.clone(), rx, client_handler.clone()); - handler_futures.push(handler_fut); - } - - run_test(handles.clone(), timeout).await; - - // cleanup - for handle in handles { - handle.handle.shutdown().await.unwrap(); - } - for switch in kill_switches { - let _ = switch.send(()).await; - } - - for fut in handler_futures { - fut.await; - } -} - -fn gen_peerid_map( - handles: &[Arc>], -) -> HashMap { - let mut r_val = HashMap::new(); - for handle in handles { - r_val.insert(handle.peer_id(), handle.id()); - } - r_val -} - -/// print the connections for each handle in `handles` -/// useful for debugging -pub async fn print_connections(handles: &[Arc>]) { - let m = gen_peerid_map(handles); - warn!("PRINTING CONNECTION STATES"); - for handle in handles { - warn!( - "peer {}, connected to {:?}", - handle.id(), - handle - .connected_pids() - .await - .unwrap() - .iter() - .map(|pid| m.get(pid).unwrap()) - .collect::>() - ); - } -} - -/// Spins up `num_of_nodes` nodes, connects them to each other -/// and waits for connections to propagate to all nodes. -#[allow(clippy::type_complexity)] -#[instrument] -pub async fn spin_up_swarms( - num_of_nodes: usize, - timeout_len: Duration, -) -> Result, NetworkNodeReceiver)>, TestError> { - let mut handles = Vec::new(); - let mut node_addrs = Vec::<(PeerId, Multiaddr)>::new(); - let mut connecting_futs = Vec::new(); - // should never panic unless num_nodes is 0 - let replication_factor = NonZeroUsize::new(num_of_nodes - 1).unwrap(); - - for i in 0..num_of_nodes { - // Get an unused port - let port = portpicker::pick_unused_port().expect("Failed to get an unused port"); - - // Use the port to create a Multiaddr - let addr = - Multiaddr::from_str(format!("/ip4/127.0.0.1/udp/{port}/quic-v1").as_str()).unwrap(); - - let config = NetworkNodeConfigBuilder::default() - .replication_factor(replication_factor) - .bind_address(Some(addr.clone())) - .to_connect_addrs(HashSet::default()) - .build() - .map_err(|e| TestError::ConfigError(format!("failed to build network node: {e}")))?; - - let (rx, node) = spawn_network_node(config.clone(), i).await.unwrap(); - - // Add ourselves to the list of node addresses to connect to - node_addrs.push((node.peer_id(), addr)); - - let node = Arc::new(node); - connecting_futs.push({ - let node = Arc::clone(&node); - async move { - node.begin_bootstrap().await?; - node.lookup_pid(PeerId::random()).await - } - .boxed_local() - }); - let node_with_state = HandleWithState { - handle: Arc::clone(&node), - state: Arc::default(), - }; - handles.push((node_with_state, rx)); - } - - for (handle, _) in &handles[0..num_of_nodes] { - let to_share = node_addrs.clone(); - handle - .handle - .add_known_peers(to_share) - .await - .map_err(|e| TestError::HandleError(format!("failed to add known peers: {e}")))?; - } - - let res = join_all(connecting_futs.into_iter()).await; - let mut failing_nodes = Vec::new(); - for (idx, a_node) in res.iter().enumerate() { - if a_node.is_err() { - failing_nodes.push(idx); - } - } - if !failing_nodes.is_empty() { - return Err(TestError::Timeout(failing_nodes, "spinning up".to_string())); - } - - for (handle, _) in &handles { - handle - .handle - .subscribe("global".to_string()) - .await - .map_err(|e| TestError::HandleError(format!("failed to subscribe: {e}")))?; - } - - async_sleep(Duration::from_secs(5)).await; - - Ok(handles) -} - -#[derive(Debug, Error)] -pub enum TestError { - #[error("Error with network node handle: {0}")] - HandleError(String), - - #[error("Configuration error: {0}")] - ConfigError(String), - - #[error("The following nodes timed out: {0:?} while {1}")] - Timeout(Vec, String), - - #[error( - "Inconsistent state while running test. Expected {expected:?}, got {actual:?} on node {id}" - )] - InconsistentState { id: usize, expected: S, actual: S }, -} diff --git a/crates/libp2p-networking/tests/counter.rs b/crates/libp2p-networking/tests/counter.rs deleted file mode 100644 index 6e1214c547..0000000000 --- a/crates/libp2p-networking/tests/counter.rs +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -#![allow(clippy::panic)] - -mod common; -use std::{fmt::Debug, sync::Arc, time::Duration}; - -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::prelude::StreamExt; -use common::{test_bed, HandleWithState, TestError}; -use hotshot_types::{ - signature_key::BLSPubKey, - traits::{network::NetworkError, signature_key::SignatureKey}, -}; -use libp2p_networking::network::{ - behaviours::dht::record::{Namespace, RecordKey, RecordValue}, - NetworkEvent, -}; -use rand::{rngs::StdRng, seq::IteratorRandom, Rng, SeedableRng}; -use serde::{Deserialize, Serialize}; -#[cfg(async_executor_impl = "tokio")] -use tokio_stream::StreamExt; -use tracing::{debug, error, info, instrument, warn}; - -use crate::common::print_connections; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - -pub type CounterState = u32; - -const NUM_ROUNDS: usize = 100; - -const TOTAL_NUM_PEERS_COVERAGE: usize = 10; -const TIMEOUT_COVERAGE: Duration = Duration::from_secs(120); - -const TOTAL_NUM_PEERS_STRESS: usize = 100; -const TIMEOUT_STRESS: Duration = Duration::from_secs(60); - -const DHT_KV_PADDING: usize = 1024; - -/// Message types. We can either -/// - increment the Counter -/// - request a counter value -/// - reply with a counter value -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub enum CounterMessage { - IncrementCounter { - from: CounterState, - to: CounterState, - }, - AskForCounter, - MyCounterIs(CounterState), - Noop, -} - -/// Given a slice of handles assumed to be larger than 0, -/// chooses one -/// # Panics -/// panics if handles is of length 0 -fn random_handle( - handles: &[HandleWithState], - rng: &mut dyn rand::RngCore, -) -> HandleWithState { - handles.iter().choose(rng).unwrap().clone() -} - -/// event handler for events from the swarm -/// - updates state based on events received -/// - replies to direct messages -#[instrument] -pub async fn counter_handle_network_event( - event: NetworkEvent, - handle: HandleWithState, -) -> Result<(), NetworkError> { - use CounterMessage::*; - use NetworkEvent::*; - match event { - IsBootstrapped | NetworkEvent::ConnectedPeersUpdate(..) => {} - GossipMsg(m) | DirectResponse(m, _) => { - if let Ok(msg) = bincode::deserialize::(&m) { - match msg { - // direct message only - MyCounterIs(c) => { - handle.state.modify(|s| *s = c).await; - } - // gossip message only - IncrementCounter { from, to, .. } => { - handle - .state - .modify(|s| { - if *s == from { - *s = to; - } - }) - .await; - } - // only as a response - AskForCounter | Noop => {} - } - } else { - error!("FAILED TO DESERIALIZE MSG {:?}", m); - } - } - DirectRequest(m, _, chan) => { - if let Ok(msg) = bincode::deserialize::(&m) { - match msg { - // direct message request - IncrementCounter { from, to, .. } => { - handle - .state - .modify(|s| { - if *s == from { - *s = to; - } - }) - .await; - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - // direct message response - AskForCounter => { - let response = MyCounterIs(handle.state.copied().await); - handle - .handle - .direct_response(chan, &bincode::serialize(&response).unwrap()) - .await?; - } - MyCounterIs(_) => { - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - Noop => { - handle - .handle - .direct_response( - chan, - &bincode::serialize(&CounterMessage::Noop).unwrap(), - ) - .await?; - } - } - } - } - }; - Ok(()) -} - -/// `requester_handle` asks for `requestee_handle`'s state, -/// and then `requester_handle` updates its state to equal `requestee_handle`. -/// # Panics -/// on error -#[allow(clippy::similar_names)] -async fn run_request_response_increment<'a, K: SignatureKey + 'static>( - requester_handle: HandleWithState, - requestee_handle: HandleWithState, - timeout: Duration, -) -> Result<(), TestError> { - async move { - let new_state = requestee_handle.state.copied().await; - - // set up state change listener - #[cfg(async_executor_impl = "async-std")] - let mut stream = requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state); - #[cfg(async_executor_impl = "tokio")] - let mut stream = Box::pin( - requester_handle.state.wait_timeout_until_with_trigger(timeout, move |state| *state == new_state), - ); - #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] - compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - - let requestee_pid = requestee_handle.handle.peer_id(); - - match stream.next().await.unwrap() { - Ok(()) => {} - Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); - std::process::exit(-1)}, - } - requester_handle.handle - .direct_request(requestee_pid, &bincode::serialize(&CounterMessage::AskForCounter).unwrap()) - .await - .map_err(|e| TestError::HandleError(format!("failed to send direct request: {e}")))?; - match stream.next().await.unwrap() { - Ok(()) => {} - Err(e) => {error!("timed out waiting for {requestee_pid:?} to update state: {e}"); - std::process::exit(-1)}, } - - let s1 = requester_handle.state.copied().await; - - // sanity check - if s1 == new_state { - Ok(()) - } else { - Err(TestError::InconsistentState { - id: requester_handle.handle.id(), - expected: new_state, - actual: s1, - }) - } - } - .await -} - -/// broadcasts `msg` from a randomly chosen handle -/// then asserts that all nodes match `new_state` -async fn run_gossip_round( - handles: &[HandleWithState], - msg: CounterMessage, - new_state: CounterState, - timeout_duration: Duration, -) -> Result<(), TestError> { - let mut rng = rand::thread_rng(); - let msg_handle = random_handle(handles, &mut rng); - msg_handle.state.modify(|s| *s = new_state).await; - - let mut futs = Vec::new(); - - let len = handles.len(); - for handle in handles { - // already modified, so skip msg_handle - if handle.handle.peer_id() != msg_handle.handle.peer_id() { - let stream = handle - .state - .wait_timeout_until_with_trigger(timeout_duration, |state| *state == new_state); - futs.push(Box::pin(stream)); - } - } - - #[cfg(async_executor_impl = "async-std")] - let mut merged_streams = futures::stream::select_all(futs); - #[cfg(async_executor_impl = "tokio")] - let mut merged_streams = Box::pin(futures::stream::select_all(futs)); - #[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] - compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} - - // make sure all are ready/listening - for i in 0..len - 1 { - // unwrap is okay because stream must have 2 * (len - 1) elements - match merged_streams.next().await.unwrap() { - Ok(()) => {} - Err(e) => { - error!("timed out waiting for handle {i:?} to subscribe to state events: {e}"); - std::process::exit(-1) - } - } - } - - msg_handle - .handle - .gossip("global".to_string(), &bincode::serialize(&msg).unwrap()) - .await - .map_err(|e| TestError::HandleError(format!("failed to gossip: {e}")))?; - - for _ in 0..len - 1 { - // wait for all events to finish - // then check for failures - let _ = merged_streams.next().await; - } - - let mut failing = Vec::new(); - for handle in handles { - let handle_state = handle.state.copied().await; - if handle_state != new_state { - failing.push(handle.handle.id()); - println!("state: {handle_state:?}, expected: {new_state:?}"); - } - } - if !failing.is_empty() { - let nodes = handles - .iter() - .cloned() - .map(|h| h.handle) - .collect::>(); - print_connections(nodes.as_slice()).await; - return Err(TestError::Timeout(failing, "gossiping".to_string())); - } - - Ok(()) -} - -async fn run_intersperse_many_rounds( - handles: Vec>, - timeout: Duration, -) { - for i in 0..u32::try_from(NUM_ROUNDS).unwrap() { - if i % 2 == 0 { - run_request_response_increment_all(&handles, timeout).await; - } else { - run_gossip_rounds(&handles, 1, i, timeout).await; - } - } - for h in handles { - assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); - } -} - -async fn run_dht_many_rounds( - handles: Vec>, - timeout: Duration, -) { - run_dht_rounds(&handles, timeout, 0, NUM_ROUNDS).await; -} - -async fn run_dht_one_round( - handles: Vec>, - timeout: Duration, -) { - run_dht_rounds(&handles, timeout, 0, 1).await; -} - -async fn run_request_response_many_rounds( - handles: Vec>, - timeout: Duration, -) { - for _i in 0..NUM_ROUNDS { - run_request_response_increment_all(&handles, timeout).await; - } - for h in handles { - assert_eq!(h.state.copied().await, u32::try_from(NUM_ROUNDS).unwrap()); - } -} - -/// runs one round of request response -/// # Panics -/// on error -async fn run_request_response_one_round( - handles: Vec>, - timeout: Duration, -) { - run_request_response_increment_all(&handles, timeout).await; - for h in handles { - assert_eq!(h.state.copied().await, 1); - } -} - -/// runs multiple rounds of gossip -/// # Panics -/// on error -async fn run_gossip_many_rounds( - handles: Vec>, - timeout: Duration, -) { - run_gossip_rounds(&handles, NUM_ROUNDS, 0, timeout).await; -} - -/// runs one round of gossip -/// # Panics -/// on error -async fn run_gossip_one_round( - handles: Vec>, - timeout: Duration, -) { - run_gossip_rounds(&handles, 1, 0, timeout).await; -} - -/// runs many rounds of dht -/// # Panics -/// on error -async fn run_dht_rounds( - handles: &[HandleWithState], - timeout: Duration, - _starting_val: usize, - num_rounds: usize, -) { - let mut rng = rand::thread_rng(); - for i in 0..num_rounds { - debug!("begin round {}", i); - let msg_handle = random_handle(handles, &mut rng); - - // Create a random keypair - let mut rng = StdRng::from_entropy(); - let (public_key, private_key) = K::generated_from_seed_indexed([1; 32], rng.gen::()); - - // Create a random value to sign - let value = (0..DHT_KV_PADDING) - .map(|_| rng.gen::()) - .collect::>(); - - // Create the record key - let key = RecordKey::new(Namespace::Lookup, public_key.to_bytes().clone()); - - // Sign the value - let value = RecordValue::new_signed(&key, value, &private_key).expect("signing failed"); - - // Put the key - msg_handle - .handle - .put_record(key.clone(), value.clone()) - .await - .unwrap(); - - // get the key from the other nodes - for handle in handles { - let result: Result, NetworkError> = - handle.handle.get_record_timeout(key.clone(), timeout).await; - match result { - Err(e) => { - error!("DHT error {e:?} during GET"); - std::process::exit(-1); - } - Ok(v) => { - assert_eq!(v, value.value()); - } - } - } - } -} - -/// runs `num_rounds` of message broadcast, incrementing the state of all nodes each broadcast -async fn run_gossip_rounds( - handles: &[HandleWithState], - num_rounds: usize, - starting_state: CounterState, - timeout: Duration, -) { - let mut old_state = starting_state; - for i in 0..num_rounds { - info!("running gossip round {}", i); - let new_state = old_state + 1; - let msg = CounterMessage::IncrementCounter { - from: old_state, - to: new_state, - }; - run_gossip_round(handles, msg, new_state, timeout) - .await - .unwrap(); - old_state = new_state; - } -} - -/// chooses a random handle from `handles` -/// increments its state by 1, -/// then has all other peers request its state -/// and update their state to the recv'ed state -#[allow(clippy::similar_names)] -async fn run_request_response_increment_all( - handles: &[HandleWithState], - timeout: Duration, -) { - let mut rng = rand::thread_rng(); - let requestee_handle = random_handle(handles, &mut rng); - requestee_handle.state.modify(|s| *s += 1).await; - info!("RR REQUESTEE IS {:?}", requestee_handle.handle.peer_id()); - let mut futs = Vec::new(); - for handle in handles { - if handle - .handle - .lookup_pid(requestee_handle.handle.peer_id()) - .await - .is_err() - { - error!("ERROR LOOKING UP REQUESTEE ADDRS"); - } - // NOTE uncomment if debugging - // let _ = h.print_routing_table().await; - // skip `requestee_handle` - if handle.handle.peer_id() != requestee_handle.handle.peer_id() { - let requester_handle = handle.clone(); - futs.push(run_request_response_increment( - requester_handle, - requestee_handle.clone(), - timeout, - )); - } - } - - // NOTE this was originally join_all - // but this is simpler. - let results = Arc::new(RwLock::new(vec![])); - - let len = futs.len(); - - for _ in 0..futs.len() { - let fut = futs.pop().unwrap(); - let results = Arc::clone(&results); - async_spawn(async move { - let res = fut.await; - results.write().await.push(res); - }); - } - loop { - let l = results.read().await.iter().len(); - if l >= len { - break; - } - info!("NUMBER OF RESULTS for increment all is: {}", l); - async_sleep(Duration::from_secs(1)).await; - } - - if results.read().await.iter().any(Result::is_err) { - let nodes = handles - .iter() - .cloned() - .map(|h| h.handle) - .collect::>(); - print_connections(nodes.as_slice()).await; - let mut states = vec![]; - for handle in handles { - states.push(handle.state.copied().await); - } - error!("states: {states:?}"); - std::process::exit(-1); - } -} - -/// simple case of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_request_response_one_round() { - Box::pin(test_bed( - run_request_response_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress test of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_request_response_many_rounds() { - Box::pin(test_bed( - run_request_response_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress test of broadcast + direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_intersperse_many_rounds() { - Box::pin(test_bed( - run_intersperse_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_gossip_many_rounds() { - Box::pin(test_bed( - run_gossip_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// simple case of broadcast message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_gossip_one_round() { - Box::pin(test_bed( - run_gossip_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// simple case of direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_request_response_one_round() { - Box::pin(test_bed( - run_request_response_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress test of direct messsage -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_request_response_many_rounds() { - Box::pin(test_bed( - run_request_response_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress test of broadcast + direct message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_intersperse_many_rounds() { - Box::pin(test_bed( - run_intersperse_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// stress teset that we can broadcast a message out and get counter increments -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_gossip_many_rounds() { - Box::pin(test_bed( - run_gossip_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of broadcast message -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_gossip_one_round() { - Box::pin(test_bed( - run_gossip_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of one dht publish event -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_dht_one_round() { - Box::pin(test_bed( - run_dht_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// many dht publishing events -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -#[ignore] -async fn test_stress_dht_many_rounds() { - Box::pin(test_bed( - run_dht_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_STRESS, - TIMEOUT_STRESS, - )) - .await; -} - -/// simple case of one dht publish event -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_dht_one_round() { - Box::pin(test_bed( - run_dht_one_round::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} - -/// many dht publishing events -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] -#[instrument] -async fn test_coverage_dht_many_rounds() { - Box::pin(test_bed( - run_dht_many_rounds::, - counter_handle_network_event, - TOTAL_NUM_PEERS_COVERAGE, - TIMEOUT_COVERAGE, - )) - .await; -} diff --git a/crates/macros/src/lib.rs b/crates/macros/src/lib.rs index e874468cda..e1920bf4fa 100644 --- a/crates/macros/src/lib.rs +++ b/crates/macros/src/lib.rs @@ -127,15 +127,11 @@ impl TestData { quote! { #[cfg(test)] #slow_attribute - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread") - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] #[tracing::instrument] async fn #test_name() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + hotshot_testing::test_builder::TestDescription::<#ty, #imply, #version>::gen_launcher((#metadata), 0).launch().run_test::<#builder_impl>().await; } } @@ -364,7 +360,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { use hotshot_testing::{predicates::{Predicate, PredicateResult}}; use async_broadcast::broadcast; use hotshot_task_impls::events::HotShotEvent; - use async_compatibility_layer::art::async_timeout; + use tokio::time::timeout; use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::sync::Arc; @@ -403,7 +399,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { let mut result = PredicateResult::Incomplete; - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + while let Ok(Ok(received_output)) = timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; @@ -446,7 +442,7 @@ pub fn test_scripts(input: proc_macro::TokenStream) -> TokenStream { while from_test.try_recv().is_ok() {} let mut result = PredicateResult::Incomplete; - while let Ok(Ok(received_output)) = async_timeout(#scripts.timeout, from_task.recv_direct()).await { + while let Ok(Ok(received_output)) = timeout(#scripts.timeout, from_task.recv_direct()).await { tracing::debug!("Test received: {:?}", received_output); let output_asserts = &mut #task_expectations[stage_number].output_asserts; diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index 59bce1ae84..557bb0ed5b 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -4,7 +4,6 @@ version = { workspace = true } edition = { workspace = true } [dependencies] -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } clap.workspace = true futures = { workspace = true } @@ -17,18 +16,14 @@ tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } toml = { workspace = true } -thiserror = "1" +thiserror = "2" csv = "1" vbs = { workspace = true } vec1 = { workspace = true } multiaddr = "0.18" anyhow.workspace = true bincode.workspace = true - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } [lints] workspace = true diff --git a/crates/orchestrator/README.md b/crates/orchestrator/README.md index 09df305c34..1bf5ade9c2 100644 --- a/crates/orchestrator/README.md +++ b/crates/orchestrator/README.md @@ -2,4 +2,4 @@ This crate implements an orchestrator that coordinates starting the network with a particular configuration. It is useful for testing and benchmarking. Like the web server, the orchestrator is built using [Tide Disco](https://github.com/EspressoSystems/tide-disco). -To run the orchestrator: `just async_std example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` \ No newline at end of file +To run the orchestrator: `just example orchestrator http://0.0.0.0:3333 ./crates/orchestrator/run-config.toml` \ No newline at end of file diff --git a/crates/orchestrator/src/client.rs b/crates/orchestrator/src/client.rs index bac62ecdd7..53b097428a 100644 --- a/crates/orchestrator/src/client.rs +++ b/crates/orchestrator/src/client.rs @@ -6,7 +6,6 @@ use std::{net::SocketAddr, time::Duration}; -use async_compatibility_layer::art::async_sleep; use clap::Parser; use futures::{Future, FutureExt}; use hotshot_types::{ @@ -17,6 +16,7 @@ use hotshot_types::{ use libp2p::{Multiaddr, PeerId}; use surf_disco::{error::ClientError, Client}; use tide_disco::Url; +use tokio::time::sleep; use tracing::{info, instrument}; use vbs::BinarySerializer; @@ -168,14 +168,14 @@ pub struct MultiValidatorArgs { /// If we are unable to get the configuration from the orchestrator pub async fn get_complete_config( client: &OrchestratorClient, - my_own_validator_config: ValidatorConfig, + mut validator_config: ValidatorConfig, libp2p_advertise_address: Option, libp2p_public_key: Option, -) -> anyhow::Result<(NetworkConfig, NetworkConfigSource)> { +) -> anyhow::Result<(NetworkConfig, ValidatorConfig, NetworkConfigSource)> { // get the configuration from the orchestrator let run_config: NetworkConfig = client .post_and_wait_all_public_keys::( - my_own_validator_config, + &mut validator_config, libp2p_advertise_address, libp2p_public_key, ) @@ -183,9 +183,13 @@ pub async fn get_complete_config( info!( "Retrieved config; our node index is {}. DA committee member: {}", - run_config.node_index, run_config.config.my_own_validator_config.is_da + run_config.node_index, validator_config.is_da ); - Ok((run_config, NetworkConfigSource::Orchestrator)) + Ok(( + run_config, + validator_config, + NetworkConfigSource::Orchestrator, + )) } impl ValidatorArgs { @@ -393,7 +397,7 @@ impl OrchestratorClient { #[instrument(skip(self), name = "orchestrator public keys")] pub async fn post_and_wait_all_public_keys( &self, - mut validator_config: ValidatorConfig, + validator_config: &mut ValidatorConfig, libp2p_advertise_address: Option, libp2p_public_key: Option, ) -> NetworkConfig { @@ -423,7 +427,7 @@ impl OrchestratorClient { break (index, is_da); } - async_sleep(Duration::from_millis(250)).await; + sleep(Duration::from_millis(250)).await; }; validator_config.is_da = is_da; @@ -445,7 +449,6 @@ impl OrchestratorClient { let mut network_config = self.get_config_after_collection().await; network_config.node_index = node_index; - network_config.config.my_own_validator_config = validator_config; network_config } @@ -510,7 +513,7 @@ impl OrchestratorClient { Ok(x) => break x, Err(err) => { tracing::info!("{err}"); - async_sleep(Duration::from_millis(250)).await; + sleep(Duration::from_millis(250)).await; } } } diff --git a/crates/task-impls/Cargo.toml b/crates/task-impls/Cargo.toml index c9a67c0c6a..12e8b368a3 100644 --- a/crates/task-impls/Cargo.toml +++ b/crates/task-impls/Cargo.toml @@ -14,7 +14,6 @@ test-srs = ["jf-vid/test-srs"] [dependencies] anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } @@ -29,6 +28,7 @@ hotshot-types = { path = "../types" } hotshot-builder-api = { path = "../builder-api" } jf-signature = { workspace = true } jf-vid = { workspace = true } +lru.workspace = true rand = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } @@ -41,11 +41,7 @@ url = { workspace = true } utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } [lints] workspace = true diff --git a/crates/task-impls/src/builder.rs b/crates/task-impls/src/builder.rs index fba215217b..330f5200d6 100644 --- a/crates/task-impls/src/builder.rs +++ b/crates/task-impls/src/builder.rs @@ -6,7 +6,6 @@ use std::time::{Duration, Instant}; -use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::v0_1::{ block_info::AvailableBlockInfo, builder::{BuildError, Error as BuilderApiError}, @@ -20,6 +19,7 @@ use serde::{Deserialize, Serialize}; use surf_disco::{client::HealthStatus, Client, Url}; use tagged_base64::TaggedBase64; use thiserror::Error; +use tokio::time::sleep; use vbs::version::StaticVersionType; #[derive(Debug, Error, Serialize, Deserialize)] @@ -54,6 +54,7 @@ impl From for BuilderClientError { BuildError::Missing => Self::BlockMissing, BuildError::Error(message) => Self::Api(message), }, + BuilderApiError::TxnStat(source) => Self::Api(source.to_string()), } } } @@ -96,7 +97,7 @@ impl BuilderClient { ) { return true; } - async_sleep(backoff).await; + sleep(backoff).await; backoff *= 2; } false @@ -185,6 +186,30 @@ pub mod v0_1 { .await .map_err(Into::into) } + + /// Claim block and provide the number of nodes information to the builder for VID + /// computation. + /// + /// # Errors + /// - [`BuilderClientError::BlockNotFound`] if block isn't available + /// - [`BuilderClientError::Api`] if API isn't responding or responds incorrectly + pub async fn claim_block_with_num_nodes( + &self, + block_hash: BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuilderClientError> { + let encoded_signature: TaggedBase64 = signature.clone().into(); + self.client + .get(&format!( + "{LEGACY_BUILDER_MODULE}/claimblockwithnumnodes/{block_hash}/{view_number}/{sender}/{encoded_signature}/{num_nodes}" + )) + .send() + .await + .map_err(Into::into) + } } } diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index c1edf60bc7..cfc399bd56 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -7,7 +7,6 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, @@ -18,14 +17,13 @@ use hotshot_types::{ }, vote::HasViewNumber, }; +use tokio::{spawn, time::sleep}; use tracing::instrument; use utils::anytrace::*; use super::ConsensusTaskState; use crate::{ - consensus::Versions, - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, + consensus::Versions, events::HotShotEvent, helpers::broadcast_event, vote_collection::handle_vote, }; @@ -40,14 +38,19 @@ pub(crate) async fn handle_quorum_vote_recv< sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { - // Are we the leader for this view? + let is_vote_leaf_extended = task_state + .consensus + .read() + .await + .is_leaf_extended(vote.data.leaf_commit); + let we_are_leader = task_state + .quorum_membership + .leader(vote.view_number() + 1, task_state.cur_epoch)? + == task_state.public_key; ensure!( - task_state - .quorum_membership - .leader(vote.view_number() + 1, task_state.cur_epoch)? - == task_state.public_key, + is_vote_leaf_extended || we_are_leader, info!( - "We are not the leader for view {:?}", + "We are not the leader for view {:?} and this is not the last vote for eQC", vote.view_number() + 1 ) ); @@ -62,6 +65,7 @@ pub(crate) async fn handle_quorum_vote_recv< &event, sender, &task_state.upgrade_lock, + !is_vote_leaf_extended, ) .await?; @@ -101,6 +105,7 @@ pub(crate) async fn handle_timeout_vote_recv< &event, sender, &task_state.upgrade_lock, + true, ) .await?; @@ -115,9 +120,15 @@ pub(crate) async fn handle_view_change< V: Versions, >( new_view_number: TYPES::View, + epoch_number: TYPES::Epoch, sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { + if epoch_number > task_state.cur_epoch { + task_state.cur_epoch = epoch_number; + tracing::info!("Progress: entered epoch {:>6}", *epoch_number); + } + ensure!( new_view_number > task_state.cur_view, "New view is not larger than the current view" @@ -126,8 +137,16 @@ pub(crate) async fn handle_view_change< let old_view_number = task_state.cur_view; tracing::debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + if *old_view_number / 100 != *new_view_number / 100 { + tracing::info!("Progress: entered view {:>6}", *new_view_number); + } // Move this node to the next view task_state.cur_view = new_view_number; + task_state + .consensus + .write() + .await + .update_view(new_view_number)?; // If we have a decided upgrade certificate, the protocol version may also have been upgraded. let decided_upgrade_certificate_read = task_state @@ -147,13 +166,11 @@ pub(crate) async fn handle_view_change< // Spawn a timeout task if we did actually update view let timeout = task_state.timeout; - let new_timeout_task = async_spawn({ + let new_timeout_task = spawn({ let stream = sender.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = new_view_number + 1; + let view_number = new_view_number; async move { - async_sleep(Duration::from_millis(timeout)).await; + sleep(Duration::from_millis(timeout)).await; broadcast_event( Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, @@ -163,11 +180,7 @@ pub(crate) async fn handle_view_change< }); // Cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; + std::mem::replace(&mut task_state.timeout_task, new_timeout_task).abort(); let consensus_reader = task_state.consensus.read().await; consensus_reader @@ -191,14 +204,14 @@ pub(crate) async fn handle_view_change< // Do the comparison before the subtraction to avoid potential overflow, since // `last_decided_view` may be greater than `cur_view` if the node is catching up. if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(task_state.last_decided_view.u64()).unwrap() + > usize::try_from(consensus_reader.last_decided_view().u64()).unwrap() { consensus_reader .metrics .number_of_views_since_last_decide .set( usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + - usize::try_from(consensus_reader.last_decided_view().u64()).unwrap(), ); } @@ -223,7 +236,7 @@ pub(crate) async fn handle_timeout task_state: &mut ConsensusTaskState, ) -> Result<()> { ensure!( - task_state.cur_view < view_number, + task_state.cur_view <= view_number, "Timeout event is for an old view" ); @@ -255,7 +268,7 @@ pub(crate) async fn handle_timeout ) .await; - tracing::debug!( + tracing::error!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index edff8f6078..15d6bc6ec8 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -8,8 +8,6 @@ use std::sync::Arc; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ @@ -23,7 +21,6 @@ use hotshot_types::{ signature_key::SignatureKey, }, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::Result; @@ -90,18 +87,18 @@ pub struct ConsensusTaskState, V: /// A reference to the metrics trait. pub consensus: OuterConsensus, - /// The last decided view - pub last_decided_view: TYPES::View, - /// The node's id pub id: u64, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> ConsensusTaskState { /// Handles a consensus event received on the event stream - #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, last_decided_view = *self.last_decided_view), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] + #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, cur_epoch = *self.cur_epoch), name = "Consensus replica task", level = "error", target = "ConsensusTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -122,8 +119,10 @@ impl, V: Versions> ConsensusTaskSt tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); } } - HotShotEvent::ViewChange(new_view_number) => { - if let Err(e) = handle_view_change(*new_view_number, &sender, self).await { + HotShotEvent::ViewChange(new_view_number, epoch_number) => { + if let Err(e) = + handle_view_change(*new_view_number, *epoch_number, &sender, self).await + { tracing::trace!("Failed to handle ViewChange event; error = {e}"); } } @@ -132,21 +131,6 @@ impl, V: Versions> ConsensusTaskSt tracing::debug!("Failed to handle Timeout event; error = {e}"); } } - HotShotEvent::LastDecidedViewUpdated(view_number) => { - if *view_number < self.last_decided_view { - tracing::debug!("New decided view is not newer than ours"); - } else { - self.last_decided_view = *view_number; - if let Err(e) = self - .consensus - .write() - .await - .update_last_decided_view(*view_number) - { - tracing::trace!("{e:?}"); - } - } - } _ => {} } @@ -170,5 +154,8 @@ impl, V: Versions> TaskState } /// Joins all subtasks. - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) { + // Cancel the old timeout task + std::mem::replace(&mut self.timeout_task, tokio::spawn(async {})).abort(); + } } diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 557895e95d..edaa5123e5 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -7,14 +7,11 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::spawn_blocking; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::{Consensus, OuterConsensus, View}, + consensus::{Consensus, OuterConsensus}, data::{DaProposal, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, @@ -28,12 +25,10 @@ use hotshot_types::{ signature_key::SignatureKey, storage::Storage, }, - utils::ViewInner, vote::HasViewNumber, }; use sha2::{Digest, Sha256}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::spawn_blocking; +use tokio::{spawn, task::spawn_blocking}; use tracing::instrument; use utils::anytrace::*; @@ -89,7 +84,7 @@ pub struct DaTaskState, V: Version impl, V: Versions> DaTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error", target = "DaTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "DA Main Task", level = "error", target = "DaTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -183,21 +178,18 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState { - let view = *view; + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } + let view = *view; ensure!( *self.cur_view < *view, info!("Received a view change to an older view.") @@ -309,15 +301,6 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { @@ -380,5 +363,5 @@ impl, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/crates/task-impls/src/events.rs b/crates/task-impls/src/events.rs index 4aea9b5ec8..bca4b4a12b 100644 --- a/crates/task-impls/src/events.rs +++ b/crates/task-impls/src/events.rs @@ -28,7 +28,7 @@ use hotshot_types::{ block_contents::BuilderFee, network::DataRequest, node_implementation::NodeType, signature_key::SignatureKey, BlockPayload, }, - utils::{BuilderCommitment, View}, + utils::BuilderCommitment, vid::VidCommitment, vote::HasViewNumber, }; @@ -92,8 +92,8 @@ pub enum HotShotEvent { QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), - /// All dependencies for the quorum vote are validated. - QuorumVoteDependenciesValidated(TYPES::View), + /// Broadcast a quorum vote to form an eQC; emitted by a replica in the consensus task after seeing a valid quorum proposal + ExtendedQuorumVoteSend(QuorumVote), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -124,7 +124,7 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::View), + ViewChange(TYPES::View, TYPES::Epoch), /// Timeout for the view sync protocol; emitted by a replica in the view sync task ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), @@ -175,10 +175,7 @@ pub enum HotShotEvent { ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), - /// Event when the transactions task has a block formed - BlockReady(VidDisperse, TYPES::View), - /// Event when consensus decided on a leaf - LeafDecided(Vec>), + /// Send VID shares to VID storage nodes; emitted by the DA leader /// /// Like [`HotShotEvent::DaProposalSend`]. @@ -203,22 +200,6 @@ pub enum HotShotEvent { /// Upgrade certificate has been sent to the network UpgradeCertificateFormed(UpgradeCertificate), - /* Consensus State Update Events */ - /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdated(TYPES::View, View), - - /// A new locked view has been created (2-chain) - LockedViewUpdated(TYPES::View), - - /// A new anchor view has been successfully reached by this node (3-chain). - LastDecidedViewUpdated(TYPES::View), - - /// A new high_qc has been reached by this node. - UpdateHighQc(QuorumCertificate), - - /// A new high_qc has been updated in `Consensus`. - HighQcUpdated(QuorumCertificate), - /// A quorum proposal has been preliminarily validated. /// The preliminary checks include: /// 1. The proposal is not for an old view @@ -274,7 +255,9 @@ impl HotShotEvent { | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) } - HotShotEvent::QuorumVoteSend(vote) => Some(vote.view_number()), + HotShotEvent::QuorumVoteSend(vote) | HotShotEvent::ExtendedQuorumVoteSend(vote) => { + Some(vote.view_number()) + } HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), @@ -303,7 +286,6 @@ impl HotShotEvent { HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), HotShotEvent::Shutdown | HotShotEvent::TransactionSend(_, _) - | HotShotEvent::LeafDecided(_) | HotShotEvent::TransactionsRecv(_) => None, HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { @@ -316,21 +298,13 @@ impl HotShotEvent { } HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), - HotShotEvent::QuorumVoteDependenciesValidated(view_number) - | HotShotEvent::ViewChange(view_number) + HotShotEvent::ViewChange(view_number, _) | HotShotEvent::ViewSyncTimeout(view_number, _, _) | HotShotEvent::ViewSyncTrigger(view_number) - | HotShotEvent::Timeout(view_number) - | HotShotEvent::BlockReady(_, view_number) - | HotShotEvent::LockedViewUpdated(view_number) - | HotShotEvent::LastDecidedViewUpdated(view_number) - | HotShotEvent::ValidatedStateUpdated(view_number, _) => Some(*view_number), + | HotShotEvent::Timeout(view_number) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) } - HotShotEvent::UpdateHighQc(cert) | HotShotEvent::HighQcUpdated(cert) => { - Some(cert.view_number()) - } HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), HotShotEvent::VidRequestSend(request, _, _) @@ -354,6 +328,13 @@ impl Display for HotShotEvent { HotShotEvent::QuorumVoteRecv(v) => { write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) } + HotShotEvent::ExtendedQuorumVoteSend(v) => { + write!( + f, + "ExtendedQuorumVoteSend(view_number={:?})", + v.view_number() + ) + } HotShotEvent::TimeoutVoteRecv(v) => { write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) } @@ -389,12 +370,6 @@ impl Display for HotShotEvent { HotShotEvent::QuorumVoteSend(vote) => { write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) } - HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - write!( - f, - "QuorumVoteDependenciesValidated(view_number={view_number:?})" - ) - } HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", @@ -415,8 +390,11 @@ impl Display for HotShotEvent { HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) } - HotShotEvent::ViewChange(view_number) => { - write!(f, "ViewChange(view_number={view_number:?})") + HotShotEvent::ViewChange(view_number, epoch_number) => { + write!( + f, + "ViewChange(view_number={view_number:?}, epoch_number={epoch_number:?})" + ) } HotShotEvent::ViewSyncTimeout(view_number, _, _) => { write!(f, "ViewSyncTimeout(view_number={view_number:?})") @@ -508,14 +486,6 @@ impl Display for HotShotEvent { HotShotEvent::BlockRecv(packed_bundle) => { write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) } - HotShotEvent::BlockReady(_, view_number) => { - write!(f, "BlockReady(view_number={view_number:?})") - } - HotShotEvent::LeafDecided(leaves) => { - let view_numbers: Vec<::View> = - leaves.iter().map(Leaf::view_number).collect(); - write!(f, "LeafDecided({view_numbers:?})") - } HotShotEvent::VidDisperseSend(proposal, _) => write!( f, "VidDisperseSend(view_number={:?})", @@ -572,21 +542,6 @@ impl Display for HotShotEvent { proposal.data.view_number ) } - HotShotEvent::ValidatedStateUpdated(view_number, _) => { - write!(f, "ValidatedStateUpdated(view_number={view_number:?})") - } - HotShotEvent::LockedViewUpdated(view_number) => { - write!(f, "LockedViewUpdated(view_number={view_number:?})") - } - HotShotEvent::LastDecidedViewUpdated(view_number) => { - write!(f, "LastDecidedViewUpdated(view_number={view_number:?})") - } - HotShotEvent::UpdateHighQc(cert) => { - write!(f, "UpdateHighQc(view_number={:?})", cert.view_number()) - } - HotShotEvent::HighQcUpdated(cert) => { - write!(f, "HighQcUpdated(view_number={:?})", cert.view_number()) - } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { write!( f, diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index b013b6dc5d..658708a071 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -7,9 +7,9 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::broadcast; -use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{ConsensusTaskRegistry, Task, TaskState}; use hotshot_types::traits::node_implementation::NodeType; +use tokio::time::timeout; use crate::events::{HotShotEvent, HotShotTaskCompleted}; @@ -27,7 +27,7 @@ pub struct TestHarnessState { /// # Arguments /// * `event_stream` - if given, will be used to register the task builder. /// * `allow_extra_output` - whether to allow an extra output after we've seen all expected -/// outputs. Should be `false` in most cases. +/// outputs. Should be `false` in most cases. /// /// # Panics /// Panics if any state the test expects is not set. Panicking causes a test failure @@ -69,12 +69,10 @@ pub async fn run_harness> + Send to_task.broadcast_direct(Arc::new(event)).await.unwrap(); } - if async_timeout(Duration::from_secs(2), test_future) - .await - .is_err() - { - panic!("Test timeout out before all all expected outputs received"); - } + assert!( + timeout(Duration::from_secs(2), test_future).await.is_ok(), + "Test timeout out before all all expected outputs received" + ); } /// Handles an event for the Test Harness Task. If the event is expected, remove it from @@ -82,7 +80,7 @@ pub async fn run_harness> + Send /// /// # Arguments /// * `allow_extra_output` - whether to allow an extra output after we've seen all expected -/// outputs. Should be `false` in most cases. +/// outputs. Should be `false` in most cases. /// /// # Panics /// Will panic to fail the test when it receives and unexpected event diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 5da3d831e7..cce95b6be3 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -4,22 +4,18 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use core::time::Duration; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; use async_broadcast::{InactiveReceiver, Receiver, SendError, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use chrono::Utc; use committable::{Commitment, Committable}; use hotshot_task::dependency::{Dependency, EventDependency}; +use hotshot_types::utils::epoch_from_block_number; use hotshot_types::{ - consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, + consensus::OuterConsensus, data::{Leaf, QuorumProposal, ViewChangeEvidence}, event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, @@ -28,22 +24,18 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, + node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, ValidatedState, }, utils::{Terminator, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::time::timeout; use tracing::instrument; use utils::anytrace::*; -use crate::{ - events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, - request::REQUEST_TIMEOUT, -}; +use crate::{events::HotShotEvent, quorum_proposal_recv::ValidationInfo, request::REQUEST_TIMEOUT}; /// Trigger a request to the network for a proposal for a view and wait for the response or timeout. #[instrument(skip_all)] @@ -57,7 +49,7 @@ pub(crate) async fn fetch_proposal( sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, upgrade_lock: &UpgradeLock, -) -> Result> { +) -> Result<(Leaf, View)> { // We need to be able to sign this request before submitting it to the network. Compute the // payload first. let signed_proposal_request = ProposalRequestPayload { @@ -85,7 +77,7 @@ pub(crate) async fn fetch_proposal( // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. - async_timeout(REQUEST_TIMEOUT, async move { + timeout(REQUEST_TIMEOUT, async move { // We want to iterate until the proposal is not None, or until we reach the timeout. let mut proposal = None; while proposal.is_none() { @@ -123,7 +115,6 @@ pub(crate) async fn fetch_proposal( return None; } } - proposal }) .await @@ -146,6 +137,12 @@ pub(crate) async fn fetch_proposal( >::from_header(&proposal.data.block_header), ); + if let Err(e) = consensus_writer + .update_leaf(leaf.clone(), Arc::clone(&state), None, upgrade_lock) + .await + { + tracing::trace!("{e:?}"); + } let view = View { view_inner: ViewInner::Leaf { leaf: leaf.commit(upgrade_lock).await, @@ -153,20 +150,7 @@ pub(crate) async fn fetch_proposal( delta: None, }, }; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { - tracing::trace!("{e:?}"); - } - - consensus_writer - .update_saved_leaves(leaf.clone(), upgrade_lock) - .await; - - broadcast_event( - HotShotEvent::ValidatedStateUpdated(view_number, view).into(), - &event_sender, - ) - .await; - Ok(leaf) + Ok((leaf, view)) } /// Helper type to give names and to the output values of the leaf chain traversal operation. @@ -184,9 +168,6 @@ pub struct LeafChainTraversalOutcome { /// The decided leaves with corresponding validated state and VID info. pub leaf_views: Vec>, - /// The decided leaves. - pub leaves_decided: Vec>, - /// The transactions in the block payload for each leaf. pub included_txns: Option::Transaction>>>, @@ -205,7 +186,6 @@ impl Default for LeafChainTraversalOutcome { new_decided_view_number: None, new_decide_qc: None, leaf_views: Vec::new(), - leaves_decided: Vec::new(), included_txns: None, decided_upgrade_cert: None, } @@ -340,7 +320,6 @@ pub async fn decide_from_proposal( delta.clone(), vid_share, )); - res.leaves_decided.push(leaf.clone()); if let Some(ref payload) = leaf.block_payload() { res.included_txns = Some( payload @@ -371,6 +350,7 @@ pub(crate) async fn parent_leaf_and_state( private_key: ::PrivateKey, consensus: OuterConsensus, upgrade_lock: &UpgradeLock, + parent_view_number: TYPES::View, ) -> Result<(Leaf, Arc<::ValidatedState>)> { let consensus_reader = consensus.read().await; let cur_epoch = consensus_reader.cur_epoch(); @@ -381,7 +361,6 @@ pub(crate) async fn parent_leaf_and_state( next_proposal_view_number ) ); - let parent_view_number = consensus_reader.high_qc().view_number(); let vsm_contains_parent_view = consensus_reader .validated_state_map() .contains_key(&parent_view_number); @@ -403,7 +382,7 @@ pub(crate) async fn parent_leaf_and_state( } let consensus_reader = consensus.read().await; - let parent_view_number = consensus_reader.high_qc().view_number(); + //let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( debug!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) )?; @@ -436,7 +415,7 @@ pub(crate) async fn parent_leaf_and_state( /// # Errors /// If any validation or state update fails. #[allow(clippy::too_many_lines)] -#[instrument(skip_all, fields(id = task_state.id, view = *proposal.data.view_number()))] +#[instrument(skip_all, fields(id = validation_info.id, view = *proposal.data.view_number()))] pub async fn validate_proposal_safety_and_liveness< TYPES: NodeType, I: NodeImplementation, @@ -444,7 +423,7 @@ pub async fn validate_proposal_safety_and_liveness< >( proposal: Proposal>, parent_leaf: Leaf, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, event_stream: Sender>>, sender: TYPES::SignatureKey, ) -> Result<()> { @@ -452,29 +431,28 @@ pub async fn validate_proposal_safety_and_liveness< let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); ensure!( - proposed_leaf.parent_commitment() == parent_leaf.commit(&task_state.upgrade_lock).await, + proposed_leaf.parent_commitment() + == parent_leaf.commit(&validation_info.upgrade_lock).await, "Proposed leaf does not extend the parent leaf." ); let state = Arc::new( >::from_header(&proposal.data.block_header), ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&task_state.upgrade_lock).await, - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; { - let mut consensus_writer = task_state.consensus.write().await; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { + let mut consensus_writer = validation_info.consensus.write().await; + if let Err(e) = consensus_writer + .update_leaf( + proposed_leaf.clone(), + state, + None, + &validation_info.upgrade_lock, + ) + .await + { tracing::trace!("{e:?}"); } - consensus_writer - .update_saved_leaves(proposed_leaf.clone(), &task_state.upgrade_lock) - .await; // Update our internal storage of the proposal. The proposal is valid, so // we swallow this error and just log if it occurs. @@ -483,19 +461,12 @@ pub async fn validate_proposal_safety_and_liveness< }; } - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; - - let cur_epoch = task_state.cur_epoch; + let cur_epoch = validation_info.cur_epoch; UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &task_state.quorum_membership, + &validation_info.quorum_membership, cur_epoch, - &task_state.upgrade_lock, + &validation_info.upgrade_lock, ) .await?; @@ -503,7 +474,7 @@ pub async fn validate_proposal_safety_and_liveness< proposed_leaf .extends_upgrade( &parent_leaf, - &task_state.upgrade_lock.decided_upgrade_certificate, + &validation_info.upgrade_lock.decided_upgrade_certificate, ) .await?; @@ -511,9 +482,29 @@ pub async fn validate_proposal_safety_and_liveness< // Create a positive vote if either liveness or safety check // passes. - // Liveness check. { - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; + // Epoch safety check: + // The proposal is safe if + // 1. the proposed block and the justify QC block belong to the same epoch or + // 2. the justify QC is the eQC for the previous block + let proposal_epoch = + epoch_from_block_number(proposed_leaf.height(), validation_info.epoch_height); + let justify_qc_epoch = + epoch_from_block_number(parent_leaf.height(), validation_info.epoch_height); + ensure!( + proposal_epoch == justify_qc_epoch + || consensus_reader.check_eqc(&proposed_leaf, &parent_leaf), + { + error!( + "Failed epoch safety check \n Proposed leaf is {:?} \n justify QC leaf is {:?}", + proposed_leaf.clone(), + parent_leaf.clone(), + ) + } + ); + + // Liveness check. let liveness_check = justify_qc.view_number() > consensus_reader.locked_view(); // Safety check. @@ -537,7 +528,7 @@ pub async fn validate_proposal_safety_and_liveness< view_number, event: EventType::Error { error: Arc::new(e) }, }, - &task_state.output_event_stream, + &validation_info.output_event_stream, ) .await; } @@ -555,7 +546,7 @@ pub async fn validate_proposal_safety_and_liveness< sender, }, }, - &task_state.output_event_stream, + &validation_info.output_event_stream, ) .await; @@ -578,17 +569,17 @@ pub async fn validate_proposal_safety_and_liveness< /// /// # Errors /// If any validation or view number check fails. -pub async fn validate_proposal_view_and_certs< +pub(crate) async fn validate_proposal_view_and_certs< TYPES: NodeType, I: NodeImplementation, V: Versions, >( proposal: &Proposal>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, ) -> Result<()> { let view_number = proposal.data.view_number(); ensure!( - view_number >= task_state.cur_view, + view_number >= validation_info.consensus.read().await.cur_view(), "Proposal is from an older view {:?}", proposal.data.clone() ); @@ -596,9 +587,9 @@ pub async fn validate_proposal_view_and_certs< // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal .validate_signature( - &task_state.quorum_membership, - task_state.cur_epoch, - &task_state.upgrade_lock, + &validation_info.quorum_membership, + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await?; @@ -620,9 +611,9 @@ pub async fn validate_proposal_view_and_certs< ensure!( timeout_cert .is_valid_cert( - task_state.timeout_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock ) .await, "Timeout certificate for view {} was invalid", @@ -641,9 +632,9 @@ pub async fn validate_proposal_view_and_certs< ensure!( view_sync_cert .is_valid_cert( - task_state.quorum_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock ) .await, "Invalid view sync finalize cert provided" @@ -656,129 +647,15 @@ pub async fn validate_proposal_view_and_certs< // Note that we don't do anything with the certificate directly if this passes; it eventually gets stored as part of the leaf if nothing goes wrong. UpgradeCertificate::validate( &proposal.data.upgrade_certificate, - &task_state.quorum_membership, - task_state.cur_epoch, - &task_state.upgrade_lock, + &validation_info.quorum_membership, + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await?; Ok(()) } -/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the -/// `timeout_task` which are updated during the operation of the function. -/// -/// # Errors -/// Returns an [`utils::anytrace::Error`] when the new view is not greater than the current view. -pub(crate) async fn update_view, V: Versions>( - new_view: TYPES::View, - event_stream: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, -) -> Result<()> { - ensure!( - new_view > task_state.cur_view, - "New view is not greater than our current view" - ); - - let is_old_view_leader = task_state - .quorum_membership - .leader(task_state.cur_view, task_state.cur_epoch)? - == task_state.public_key; - let old_view = task_state.cur_view; - - tracing::debug!("Updating view from {} to {}", *old_view, *new_view); - - if *old_view / 100 != *new_view / 100 { - tracing::info!("Progress: entered view {:>6}", *new_view); - } - - task_state.cur_view = new_view; - - // The next view is just the current view + 1 - let next_view = task_state.cur_view + 1; - - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { - view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, - }, - &task_state.output_event_stream, - ) - }; - - // Spawn a timeout task if we did actually update view - let new_timeout_task = async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = next_view; - let timeout = Duration::from_millis(task_state.timeout); - async move { - async_sleep(timeout).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), - &stream, - ) - .await; - } - }); - - // cancel the old timeout task - cancel_task(std::mem::replace( - &mut task_state.timeout_task, - new_timeout_task, - )) - .await; - - let consensus_reader = task_state.consensus.upgradable_read().await; - consensus_reader - .metrics - .current_view - .set(usize::try_from(task_state.cur_view.u64()).unwrap()); - let new_view_time = Utc::now().timestamp(); - if is_old_view_leader { - #[allow(clippy::cast_precision_loss)] - consensus_reader - .metrics - .view_duration_as_leader - .add_point((new_view_time - task_state.cur_view_time) as f64); - } - task_state.cur_view_time = new_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(task_state.cur_view.u64()).unwrap() - > usize::try_from(consensus_reader.last_decided_view().u64()).unwrap() - { - consensus_reader - .metrics - .number_of_views_since_last_decide - .set( - usize::try_from(task_state.cur_view.u64()).unwrap() - - usize::try_from(consensus_reader.last_decided_view().u64()).unwrap(), - ); - } - let mut consensus_writer = ConsensusUpgradableReadLockGuard::upgrade(consensus_reader).await; - if let Err(e) = consensus_writer.update_view(new_view) { - tracing::trace!("{e:?}"); - } - tracing::trace!("View updated successfully"); - - Ok(()) -} - -/// Cancel a task -pub async fn cancel_task(task: JoinHandle) { - #[cfg(async_executor_impl = "async-std")] - task.cancel().await; - #[cfg(async_executor_impl = "tokio")] - task.abort(); -} - /// Helper function to send events and log errors pub async fn broadcast_event(event: E, sender: &Sender) { match sender.broadcast_direct(event).await { diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index f9cbcff865..d623962424 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -4,15 +4,18 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + hash::{DefaultHasher, Hash, Hasher}, + sync::Arc, +}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ - consensus::Consensus, + consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare}, event::{Event, EventType, HotShotAction}, message::{ @@ -30,6 +33,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; +use tokio::{spawn, task::JoinHandle}; use tracing::instrument; use utils::anytrace::*; @@ -49,6 +53,9 @@ pub struct NetworkMessageTaskState { /// This nodes public key pub public_key: TYPES::SignatureKey, + + /// Transaction Cache to ignore previously seen transatctions + pub transactions_cache: lru::LruCache, } impl NetworkMessageTaskState { @@ -127,6 +134,11 @@ impl NetworkMessageTaskState { // Handle data messages MessageKind::Data(message) => match message { DataMessage::SubmitTransaction(transaction, _) => { + let mut hasher = DefaultHasher::new(); + transaction.hash(&mut hasher); + if self.transactions_cache.put(hasher.finish(), ()).is_some() { + return; + } broadcast_event( Arc::new(HotShotEvent::TransactionsRecv(vec![transaction])), &self.internal_event_stream, @@ -200,9 +212,11 @@ pub struct NetworkEventTaskState< /// Storage to store actionable events pub storage: Arc>, /// Shared consensus state - pub consensus: Arc>>, + pub consensus: OuterConsensus, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// map view number to transmit tasks + pub transmit_tasks: BTreeMap>>, } #[async_trait] @@ -226,7 +240,7 @@ impl< Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl< @@ -280,8 +294,8 @@ impl< let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let consensus = Arc::clone(&self.consensus); - async_spawn(async move { + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); + spawn(async move { if NetworkEventTaskState::::maybe_record_action( Some(HotShotAction::VidDisperse), storage, @@ -306,7 +320,7 @@ impl< async fn maybe_record_action( maybe_action: Option, storage: Arc>, - consensus: Arc>>, + consensus: OuterConsensus, view: ::View, ) -> std::result::Result<(), ()> { if let Some(mut action) = maybe_action { @@ -331,6 +345,19 @@ impl< } } + /// Cancel all tasks for previous views + pub fn cancel_tasks(&mut self, view: TYPES::View) { + let keep = self.transmit_tasks.split_off(&view); + + while let Some((_, tasks)) = self.transmit_tasks.pop_first() { + for task in tasks { + task.abort(); + } + } + + self.transmit_tasks = keep; + } + /// Parses a `HotShotEvent` and returns a tuple of: (sender's public key, `MessageKind`, `TransmitType`) /// which will be used to create a message and transmit on the wire. /// Returns `None` if the parsing result should not be sent on the wire. @@ -381,6 +408,16 @@ impl< TransmitType::Direct(leader), )) } + HotShotEvent::ExtendedQuorumVoteSend(vote) => { + *maybe_action = Some(HotShotAction::Vote); + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote(vote.clone()), + )), + TransmitType::Broadcast, + )) + } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -582,15 +619,19 @@ impl< TransmitType::Direct(leader), )) } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { self.view = view; - self.network - .update_view::( - self.view.u64(), - self.epoch.u64(), - &self.quorum_membership, - ) - .await; + if epoch > self.epoch { + self.epoch = epoch; + } + self.cancel_tasks(view); + let net = Arc::clone(&self.network); + let epoch = self.epoch.u64(); + let mem = self.quorum_membership.clone(); + spawn(async move { + net.update_view::(view.saturating_sub(1), epoch, &mem) + .await; + }); None } HotShotEvent::VidRequestSend(req, sender, to) => Some(( @@ -614,7 +655,7 @@ impl< /// Creates a network message and spawns a task that transmits it on the wire. fn spawn_transmit_task( - &self, + &mut self, message_kind: MessageKind, maybe_action: Option, transmit: TransmitType, @@ -638,9 +679,9 @@ impl< .committee_members(view_number, self.epoch); let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); - let consensus = Arc::clone(&self.consensus); + let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); let upgrade_lock = self.upgrade_lock.clone(); - async_spawn(async move { + let handle = spawn(async move { if NetworkEventTaskState::::maybe_record_action( maybe_action, Arc::clone(&storage), @@ -694,6 +735,10 @@ impl< Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); + self.transmit_tasks + .entry(view_number) + .or_default() + .push(handle); } } @@ -778,7 +823,7 @@ pub mod test { Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl< diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index ffad0f5d1e..8c375b4b54 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -9,13 +9,10 @@ use std::{marker::PhantomData, sync::Arc}; +use anyhow::{ensure, Context, Result}; use async_broadcast::{InactiveReceiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -use hotshot_task::{ - dependency::{Dependency, EventDependency}, - dependency_task::HandleDepOutput, -}; +use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf, QuorumProposal, VidDisperse, ViewChangeEvidence}, @@ -24,6 +21,7 @@ use hotshot_types::{ traits::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, + vote::HasViewNumber, }; use tracing::instrument; use utils::anytrace::*; @@ -31,7 +29,7 @@ use vbs::version::StaticVersionType; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, + helpers::{broadcast_event, parent_leaf_and_state}, quorum_proposal::{UpgradeLock, Versions}, }; @@ -105,7 +103,7 @@ impl ProposalDependencyHandle { /// Publishes a proposal given the [`CommitmentAndMetadata`], [`VidDisperse`] /// and high qc [`hotshot_types::simple_certificate::QuorumCertificate`], /// with optional [`ViewChangeEvidence`]. - #[instrument(skip_all, target = "ProposalDependencyHandle", fields(id = self.id, view_number = *self.view_number, latest_proposed_view = *self.latest_proposed_view))] + #[instrument(skip_all, fields(id = self.id, view_number = *self.view_number, latest_proposed_view = *self.latest_proposed_view))] async fn publish_proposal( &self, commitment_and_metadata: CommitmentAndMetadata, @@ -113,6 +111,7 @@ impl ProposalDependencyHandle { view_change_evidence: Option>, formed_upgrade_certificate: Option>, decided_upgrade_certificate: Arc>>>, + parent_view_number: TYPES::View, ) -> Result<()> { let (parent_leaf, state) = parent_leaf_and_state( self.view_number, @@ -123,6 +122,7 @@ impl ProposalDependencyHandle { self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), &self.upgrade_lock, + parent_view_number, ) .await?; @@ -166,14 +166,29 @@ impl ProposalDependencyHandle { let version = self.upgrade_lock.version(self.view_number).await?; - let block_header = if version < V::Marketplace::VERSION { + let high_qc = self.consensus.read().await.high_qc().clone(); + + let builder_commitment = commitment_and_metadata.builder_commitment.clone(); + let metadata = commitment_and_metadata.metadata.clone(); + + let block_header = if version >= V::Epochs::VERSION + && self.consensus.read().await.is_high_qc_forming_eqc() + { + tracing::info!("Reached end of epoch. Proposing the same block again to form an eQC."); + let block_header = parent_leaf.block_header().clone(); + tracing::debug!( + "Proposing block no. {} to form the eQC.", + block_header.block_number() + ); + block_header + } else if version < V::Marketplace::VERSION { TYPES::BlockHeader::new_legacy( state.as_ref(), self.instance_state.as_ref(), &parent_leaf, commitment_and_metadata.commitment, - commitment_and_metadata.builder_commitment, - commitment_and_metadata.metadata, + builder_commitment, + metadata, commitment_and_metadata.fees.first().clone(), vid_share.data.common.clone(), version, @@ -190,6 +205,7 @@ impl ProposalDependencyHandle { commitment_and_metadata.builder_commitment, commitment_and_metadata.metadata, commitment_and_metadata.fees.to_vec(), + *self.view_number, vid_share.data.common.clone(), commitment_and_metadata.auction_result, version, @@ -202,7 +218,7 @@ impl ProposalDependencyHandle { let proposal = QuorumProposal { block_header, view_number: self.view_number, - justify_qc: self.consensus.read().await.high_qc().clone(), + justify_qc: high_qc, upgrade_certificate, proposal_certificate, }; @@ -242,61 +258,17 @@ impl ProposalDependencyHandle { Ok(()) } } + impl HandleDepOutput for ProposalDependencyHandle { type Output = Vec>>>>; #[allow(clippy::no_effect_underscore_binding, clippy::too_many_lines)] async fn handle_dep_result(self, res: Self::Output) { - let high_qc_view_number = self.consensus.read().await.high_qc().view_number; - let event_receiver = self.receiver.activate_cloned(); - if !self - .consensus - .read() - .await - .validated_state_map() - .contains_key(&high_qc_view_number) - { - // The proposal for the high qc view is missing, try to get it asynchronously - let membership = Arc::clone(&self.quorum_membership); - let event_sender = self.sender.clone(); - let sender_public_key = self.public_key.clone(); - let sender_private_key = self.private_key.clone(); - let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); - let upgrade_lock = self.upgrade_lock.clone(); - let rx = event_receiver.clone(); - async_spawn(async move { - fetch_proposal( - high_qc_view_number, - event_sender, - rx, - membership, - consensus, - sender_public_key, - sender_private_key, - &upgrade_lock, - ) - .await - }); - // Block on receiving the event from the event stream. - EventDependency::new( - event_receiver, - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { - *view_number == high_qc_view_number - } else { - false - } - }), - ) - .completed() - .await; - } - let mut commit_and_metadata: Option> = None; let mut timeout_certificate = None; let mut view_sync_finalize_cert = None; let mut vid_share = None; + let mut parent_view_number = None; for event in res.iter().flatten().flatten() { match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -320,8 +292,8 @@ impl HandleDepOutput for ProposalDependencyHandle< either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); } - either::Left(_) => { - // Handled by the UpdateHighQc event. + either::Left(qc) => { + parent_view_number = Some(qc.view_number()); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { @@ -334,6 +306,9 @@ impl HandleDepOutput for ProposalDependencyHandle< } } + let parent_view_number = + parent_view_number.unwrap_or(self.consensus.read().await.high_qc().view_number()); + if commit_and_metadata.is_none() { tracing::error!( "Somehow completed the proposal dependency task without a commitment and metadata" @@ -359,6 +334,7 @@ impl HandleDepOutput for ProposalDependencyHandle< proposal_cert, self.formed_upgrade_certificate.clone(), Arc::clone(&self.upgrade_lock.decided_upgrade_certificate), + parent_view_number, ) .await { diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 5d33e16664..02f6ee23e8 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -8,11 +8,8 @@ use std::{collections::BTreeMap, sync::Arc}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use either::Either; -use futures::future::join_all; use hotshot_task::{ dependency::{AndDependency, EventDependency, OrDependency}, dependency_task::DependencyTask, @@ -31,16 +28,12 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; +use crate::events::HotShotEvent; mod handlers; @@ -95,6 +88,9 @@ pub struct QuorumProposalTaskState /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> @@ -114,7 +110,7 @@ impl, V: Versions> let event = event.as_ref(); let event_view = match dependency_type { ProposalDependency::Qc => { - if let HotShotEvent::HighQcUpdated(qc) = event { + if let HotShotEvent::QcFormed(either::Left(qc)) = event { qc.view_number() + 1 } else { return false; @@ -233,7 +229,7 @@ impl, V: Versions> timeout_dependency.mark_as_completed(event); } Either::Left(_) => { - // qc_dependency.mark_as_completed(event); + qc_dependency.mark_as_completed(event); } }, HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { @@ -242,9 +238,6 @@ impl, V: Versions> HotShotEvent::VidDisperseSend(_, _) => { vid_share_dependency.mark_as_completed(event); } - HotShotEvent::HighQcUpdated(_) => { - qc_dependency.mark_as_completed(event); - } _ => {} }; @@ -350,7 +343,7 @@ impl, V: Versions> for view in (*self.latest_proposed_view + 1)..=(*new_view) { if let Some(dependency) = self.proposal_dependencies.remove(&TYPES::View::new(view)) { - cancel_task(dependency).await; + dependency.abort(); } } @@ -375,7 +368,6 @@ impl, V: Versions> "Upgrade certificate received for view {}!", *cert.view_number ); - // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. if cert.data.decide_by >= self.latest_proposed_view + 3 { tracing::debug!("Updating current formed_upgrade_certificate"); @@ -387,7 +379,6 @@ impl, V: Versions> either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( view_number, epoch_number, @@ -398,16 +389,37 @@ impl, V: Versions> } either::Left(qc) => { // Only update if the qc is from a newer view - let consensus_reader = self.consensus.read().await; - if qc.view_number <= consensus_reader.high_qc().view_number { + if qc.view_number <= self.consensus.read().await.high_qc().view_number { tracing::trace!( "Received a QC for a view that was not > than our current high QC" ); } - - // We need to gate on this data actually existing in the consensus shared state. - // So we broadcast here and handle *before* we make the task. - broadcast_event(HotShotEvent::UpdateHighQc(qc).into(), &event_sender).await; + self.consensus + .write() + .await + .update_high_qc(qc.clone()) + .wrap() + .context(error!( + "Failed to update high QC in internal consensus state!" + ))?; + + // Then update the high QC in storage + self.storage + .write() + .await + .update_high_qc(qc.clone()) + .await + .wrap() + .context(error!("Failed to update high QC in storage!"))?; + let view_number = qc.view_number() + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); + self.create_dependency_task_if_new( + view_number, + epoch_number, + event_receiver, + event_sender, + Arc::clone(&event), + )?; } }, HotShotEvent::SendPayloadCommitmentAndMetadata( @@ -458,7 +470,6 @@ impl, V: Versions> } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); - // All nodes get the latest proposed view as a proxy of `cur_view` of old. if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); @@ -484,7 +495,6 @@ impl, V: Versions> HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( view_number, epoch_number, @@ -493,59 +503,21 @@ impl, V: Versions> Arc::clone(&event), )?; } - HotShotEvent::UpdateHighQc(qc) => { - // First update the high QC internally - self.consensus - .write() - .await - .update_high_qc(qc.clone()) - .wrap() - .context(error!( - "Failed to update high QC in internal consensus state!" - ))?; - - // Then update the high QC in storage - self.storage - .write() - .await - .update_high_qc(qc.clone()) - .await - .wrap() - .context(error!("Failed to update high QC in storage!"))?; - - broadcast_event( - HotShotEvent::HighQcUpdated(qc.clone()).into(), - &event_sender, - ) - .await; - } - HotShotEvent::HighQcUpdated(qc) => { - let view_number = qc.view_number() + 1; - let epoch_number = self.consensus.read().await.cur_epoch(); - self.create_dependency_task_if_new( - view_number, - epoch_number, - event_receiver, - event_sender, - Arc::clone(&event), - )?; - } - HotShotEvent::ViewChange(view) | HotShotEvent::Timeout(view) => { - self.cancel_tasks(*view).await; + HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { + self.cancel_tasks(*view); } _ => {} } Ok(()) } + /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::View) { + pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.proposal_dependencies.split_off(&view); - let mut cancel = Vec::new(); while let Some((_, task)) = self.proposal_dependencies.pop_first() { - cancel.push(cancel_task(task)); + task.abort(); } self.proposal_dependencies = keep; - join_all(cancel).await; } } @@ -564,11 +536,8 @@ impl, V: Versions> TaskState self.handle(event, receiver.clone(), sender.clone()).await } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.proposal_dependencies.pop_first() { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/crates/task-impls/src/quorum_proposal_recv/handlers.rs b/crates/task-impls/src/quorum_proposal_recv/handlers.rs index 488854e95d..24ea89a3c2 100644 --- a/crates/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/crates/task-impls/src/quorum_proposal_recv/handlers.rs @@ -9,9 +9,9 @@ use std::sync::Arc; use async_broadcast::{broadcast, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; +use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf, QuorumProposal}, @@ -19,22 +19,23 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, traits::{ election::Membership, - node_implementation::{NodeImplementation, NodeType}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, ValidatedState, }, - utils::{View, ViewInner}, + utils::{epoch_from_block_number, View, ViewInner}, vote::{Certificate, HasViewNumber}, }; +use tokio::spawn; use tracing::instrument; use utils::anytrace::*; -use super::QuorumProposalRecvTaskState; +use super::{QuorumProposalRecvTaskState, ValidationInfo}; use crate::{ events::HotShotEvent, helpers::{ - broadcast_event, fetch_proposal, update_view, validate_proposal_safety_and_liveness, + broadcast_event, fetch_proposal, validate_proposal_safety_and_liveness, validate_proposal_view_and_certs, }, quorum_proposal_recv::{UpgradeLock, Versions}, @@ -44,33 +45,24 @@ use crate::{ #[instrument(skip_all)] async fn validate_proposal_liveness, V: Versions>( proposal: &Proposal>, - event_sender: &Sender>>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: &ValidationInfo, ) -> Result<()> { - let view_number = proposal.data.view_number(); - let mut consensus_writer = task_state.consensus.write().await; + let mut consensus_writer = validation_info.consensus.write().await; let leaf = Leaf::from_quorum_proposal(&proposal.data); let state = Arc::new( >::from_header(&proposal.data.block_header), ); - let view = View { - view_inner: ViewInner::Leaf { - leaf: leaf.commit(&task_state.upgrade_lock).await, - state, - delta: None, // May be updated to `Some` in the vote task. - }, - }; - if let Err(e) = consensus_writer.update_validated_state_map(view_number, view.clone()) { + if let Err(e) = consensus_writer + .update_leaf(leaf.clone(), state, None, &validation_info.upgrade_lock) + .await + { tracing::trace!("{e:?}"); } - consensus_writer - .update_saved_leaves(leaf.clone(), &task_state.upgrade_lock) - .await; - if let Err(e) = task_state + if let Err(e) = validation_info .storage .write() .await @@ -88,17 +80,6 @@ async fn validate_proposal_liveness(view_number, event_sender, task_state).await { - tracing::debug!("Liveness Branch - Failed to update view; error = {e:#}"); - } - if !liveness_check { bail!("Quorum Proposal failed the liveness check"); } @@ -118,7 +99,7 @@ fn spawn_fetch_proposal( sender_private_key: ::PrivateKey, upgrade_lock: UpgradeLock, ) { - async_spawn(async move { + spawn(async move { let lock = upgrade_lock; let _ = fetch_proposal( @@ -153,11 +134,11 @@ pub(crate) async fn handle_quorum_proposal_recv< quorum_proposal_sender_key: &TYPES::SignatureKey, event_sender: &Sender>>, event_receiver: &Receiver>>, - task_state: &mut QuorumProposalRecvTaskState, + validation_info: ValidationInfo, ) -> Result<()> { let quorum_proposal_sender_key = quorum_proposal_sender_key.clone(); - validate_proposal_view_and_certs(proposal, task_state) + validate_proposal_view_and_certs(proposal, &validation_info) .await .context(warn!("Failed to validate proposal view or attached certs"))?; @@ -166,13 +147,13 @@ pub(crate) async fn handle_quorum_proposal_recv< if !justify_qc .is_valid_cert( - task_state.quorum_membership.as_ref(), - task_state.cur_epoch, - &task_state.upgrade_lock, + validation_info.quorum_membership.as_ref(), + validation_info.cur_epoch, + &validation_info.upgrade_lock, ) .await { - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; consensus_reader.metrics.invalid_qc.update(1); bail!("Invalid justify_qc in proposal for view {}", *view_number); } @@ -186,7 +167,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .await; // Get the parent leaf and state. - let parent_leaf = task_state + let parent_leaf = validation_info .consensus .read() .await @@ -199,17 +180,17 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), - Arc::clone(&task_state.quorum_membership), - OuterConsensus::new(Arc::clone(&task_state.consensus.inner_consensus)), + Arc::clone(&validation_info.quorum_membership), + OuterConsensus::new(Arc::clone(&validation_info.consensus.inner_consensus)), // Note that we explicitly use the node key here instead of the provided key in the signature. // This is because the key that we receive is for the prior leader, so the payload would be routed // incorrectly. - task_state.public_key.clone(), - task_state.private_key.clone(), - task_state.upgrade_lock.clone(), + validation_info.public_key.clone(), + validation_info.private_key.clone(), + validation_info.upgrade_lock.clone(), ); } - let consensus_reader = task_state.consensus.read().await; + let consensus_reader = validation_info.consensus.read().await; let parent = match parent_leaf { Some(leaf) => { @@ -223,7 +204,7 @@ pub(crate) async fn handle_quorum_proposal_recv< }; if justify_qc.view_number() > consensus_reader.high_qc().view_number { - if let Err(e) = task_state + if let Err(e) = validation_info .storage .write() .await @@ -235,40 +216,61 @@ pub(crate) async fn handle_quorum_proposal_recv< } drop(consensus_reader); - let mut consensus_writer = task_state.consensus.write().await; + let mut consensus_writer = validation_info.consensus.write().await; if let Err(e) = consensus_writer.update_high_qc(justify_qc.clone()) { tracing::trace!("{e:?}"); } drop(consensus_writer); - broadcast_event( - HotShotEvent::HighQcUpdated(justify_qc.clone()).into(), - event_sender, - ) - .await; - let Some((parent_leaf, _parent_state)) = parent else { tracing::warn!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.data.leaf_commit ); - return validate_proposal_liveness(proposal, event_sender, task_state).await; + validate_proposal_liveness(proposal, &validation_info).await?; + let block_number = proposal.data.block_header.block_number(); + let epoch = TYPES::Epoch::new(epoch_from_block_number( + block_number, + validation_info.epoch_height, + )); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + view_number, + *epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange(view_number, epoch)), + event_sender, + ) + .await; + return Ok(()); }; // Validate the proposal validate_proposal_safety_and_liveness::( proposal.clone(), parent_leaf, - task_state, + &validation_info, event_sender.clone(), quorum_proposal_sender_key, ) .await?; - // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - if let Err(e) = update_view::(view_number, event_sender, task_state).await { - tracing::debug!("Full Branch - Failed to update view; error = {e:#}"); - } + let epoch_number = TYPES::Epoch::new(epoch_from_block_number( + proposal.data.block_header.block_number(), + validation_info.epoch_height, + )); + + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + view_number, + *epoch_number + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange(view_number, epoch_number)), + event_sender, + ) + .await; Ok(()) } diff --git a/crates/task-impls/src/quorum_proposal_recv/mod.rs b/crates/task-impls/src/quorum_proposal_recv/mod.rs index 030dc1295c..8f9d9e3f05 100644 --- a/crates/task-impls/src/quorum_proposal_recv/mod.rs +++ b/crates/task-impls/src/quorum_proposal_recv/mod.rs @@ -8,37 +8,33 @@ use std::{collections::BTreeMap, sync::Arc}; +use self::handlers::handle_quorum_proposal_recv; +use crate::{ + events::{HotShotEvent, ProposalMissing}, + helpers::{broadcast_event, fetch_proposal, parent_leaf_and_state}, +}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; -use futures::future::join_all; +use either::Either; +use futures::future::{err, join_all}; use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::{Leaf, ViewChangeEvidence}, + data::{EpochNumber, Leaf, ViewChangeEvidence}, event::Event, message::UpgradeLock, simple_certificate::UpgradeCertificate, traits::{ - node_implementation::{NodeImplementation, NodeType, Versions}, + node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, - vote::HasViewNumber, + vote::{Certificate, HasViewNumber}, }; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; use utils::anytrace::{bail, Result}; use vbs::version::Version; - -use self::handlers::handle_quorum_proposal_recv; -use crate::{ - events::{HotShotEvent, ProposalMissing}, - helpers::{broadcast_event, cancel_task, parent_leaf_and_state}, -}; - /// Event handlers for this task. mod handlers; @@ -57,24 +53,12 @@ pub struct QuorumProposalRecvTaskState, - /// Membership for Quorum Certs/votes pub quorum_membership: Arc, - /// Membership for Timeout votes/certs - pub timeout_membership: Arc, - - /// timeout task handle - pub timeout_task: JoinHandle<()>, - /// View timeout from config. pub timeout: u64, @@ -84,40 +68,61 @@ pub struct QuorumProposalRecvTaskState>, - /// last View Sync Certificate or Timeout Certificate this node formed. - pub proposal_cert: Option>, - /// Spawned tasks related to a specific view, so we can cancel them when /// they are stale pub spawned_tasks: BTreeMap>>, - /// Immutable instance state - pub instance_state: Arc, - /// The node's id pub id: u64, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, +} + +/// all the info we need to validate a proposal. This makes it easy to spawn an effemeral task to +/// do all the proposal validation without blocking the long running one +pub(crate) struct ValidationInfo, V: Versions> { + /// The node's id + pub id: u64, + /// Our public key + pub(crate) public_key: TYPES::SignatureKey, + /// Our Private Key + pub(crate) private_key: ::PrivateKey, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::Epoch, + /// Reference to consensus. The replica will require a write lock on this. + pub(crate) consensus: OuterConsensus, + /// Membership for Quorum Certs/votes + pub quorum_membership: Arc, + /// Output events to application + pub output_event_stream: async_broadcast::Sender>, + /// This node's storage ref + pub(crate) storage: Arc>, + /// Lock for a decided upgrade + pub(crate) upgrade_lock: UpgradeLock, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> QuorumProposalRecvTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::View) { + pub fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.spawned_tasks.split_off(&view); - let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { - let mut to_cancel = tasks.into_iter().map(cancel_task).collect(); - cancel.append(&mut to_cancel); + for task in tasks { + task.abort(); + } } self.spawned_tasks = keep; - join_all(cancel).await; } /// Handles all consensus events relating to propose and vote-enabling events. - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Consensus replica task", level = "error")] #[allow(unused_variables)] pub async fn handle( &mut self, @@ -125,21 +130,55 @@ impl, V: Versions> event_sender: Sender>>, event_receiver: Receiver>>, ) { - if let HotShotEvent::QuorumProposalRecv(proposal, sender) = event.as_ref() { - match handle_quorum_proposal_recv( - proposal, - sender, - &event_sender, - &event_receiver, - self, - ) - .await - { - Ok(()) => { - self.cancel_tasks(proposal.data.view_number()).await; + match event.as_ref() { + HotShotEvent::QuorumProposalRecv(proposal, sender) => { + if self.consensus.read().await.cur_view() > proposal.data.view_number() + || self.cur_view > proposal.data.view_number() + { + tracing::error!("Throwing away old proposal"); + return; + } + let validation_info = ValidationInfo:: { + id: self.id, + public_key: self.public_key.clone(), + private_key: self.private_key.clone(), + cur_epoch: self.cur_epoch, + consensus: self.consensus.clone(), + quorum_membership: Arc::clone(&self.quorum_membership), + output_event_stream: self.output_event_stream.clone(), + storage: Arc::clone(&self.storage), + upgrade_lock: self.upgrade_lock.clone(), + epoch_height: self.epoch_height, + }; + match handle_quorum_proposal_recv( + proposal, + sender, + &event_sender, + &event_receiver, + validation_info, + ) + .await + { + Ok(()) => {} + Err(e) => debug!(?e, "Failed to validate the proposal"), + } + } + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } + if self.cur_view >= *view { + return; } - Err(e) => debug!(?e, "Failed to validate the proposal"), + self.cur_view = *view; + // cancel task for any view 2 views prior or more. The view here is the oldest + // view we want to KEEP tasks for. We keep the view prior to this because + // we might still be processing the proposal from view V which caused us + // to enter view V + 1. + let oldest_view_to_keep = TYPES::View::new(view.saturating_sub(1)); + self.cancel_tasks(oldest_view_to_keep); } + _ => {} } } } @@ -161,15 +200,12 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while !self.spawned_tasks.is_empty() { let Some((_, handles)) = self.spawned_tasks.pop_first() else { break; }; for handle in handles { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/crates/task-impls/src/quorum_vote/handlers.rs b/crates/task-impls/src/quorum_vote/handlers.rs index 656737524f..d4b9edc6c7 100644 --- a/crates/task-impls/src/quorum_vote/handlers.rs +++ b/crates/task-impls/src/quorum_vote/handlers.rs @@ -6,15 +6,21 @@ use std::sync::Arc; -use async_broadcast::Sender; +use async_broadcast::{InactiveReceiver, Sender}; +use async_lock::RwLock; use chrono::Utc; use hotshot_types::{ consensus::OuterConsensus, - data::QuorumProposal, + data::{Leaf, QuorumProposal, VidDisperseShare}, event::{Event, EventType}, + message::{Proposal, UpgradeLock}, + simple_vote::{QuorumData, QuorumVote}, traits::{ + election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + signature_key::SignatureKey, storage::Storage, + ValidatedState, }, vote::HasViewNumber, }; @@ -24,19 +30,18 @@ use utils::anytrace::*; use super::QuorumVoteTaskState; use crate::{ events::HotShotEvent, - helpers::{broadcast_event, decide_from_proposal, LeafChainTraversalOutcome}, + helpers::{broadcast_event, decide_from_proposal, fetch_proposal, LeafChainTraversalOutcome}, quorum_vote::Versions, }; /// Handles the `QuorumProposalValidated` event. -#[instrument(skip_all)] +#[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number))] pub(crate) async fn handle_quorum_proposal_validated< TYPES: NodeType, I: NodeImplementation, V: Versions, >( proposal: &QuorumProposal, - sender: &Sender>>, task_state: &mut QuorumVoteTaskState, ) -> Result<()> { let LeafChainTraversalOutcome { @@ -44,7 +49,6 @@ pub(crate) async fn handle_quorum_proposal_validated< new_decided_view_number, new_decide_qc, leaf_views, - leaves_decided, included_txns, decided_upgrade_cert, } = decide_from_proposal( @@ -74,13 +78,6 @@ pub(crate) async fn handle_quorum_proposal_validated< let mut consensus_writer = task_state.consensus.write().await; if let Some(locked_view_number) = new_locked_view_number { - // Broadcast the locked view update. - broadcast_event( - HotShotEvent::LockedViewUpdated(locked_view_number).into(), - sender, - ) - .await; - consensus_writer.update_locked_view(locked_view_number)?; } @@ -93,11 +90,6 @@ pub(crate) async fn handle_quorum_proposal_validated< // Set the new decided view. consensus_writer.update_last_decided_view(decided_view_number)?; - broadcast_event( - HotShotEvent::LastDecidedViewUpdated(decided_view_number).into(), - sender, - ) - .await; consensus_writer .metrics @@ -137,10 +129,205 @@ pub(crate) async fn handle_quorum_proposal_validated< &task_state.output_event_stream, ) .await; - - broadcast_event(Arc::new(HotShotEvent::LeafDecided(leaves_decided)), sender).await; tracing::debug!("Successfully sent decide event"); } Ok(()) } + +/// Updates the shared consensus state with the new voting data. +#[instrument(skip_all, target = "VoteDependencyHandle", fields(view = *view_number))] +#[allow(clippy::too_many_arguments)] +pub(crate) async fn update_shared_state< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + consensus: OuterConsensus, + sender: Sender>>, + receiver: InactiveReceiver>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + upgrade_lock: UpgradeLock, + view_number: TYPES::View, + instance_state: Arc, + storage: Arc>, + proposed_leaf: &Leaf, + vid_share: &Proposal>, + parent_view_number: Option, +) -> Result<()> { + let justify_qc = &proposed_leaf.justify_qc(); + + let consensus_reader = consensus.read().await; + // Try to find the validated vview within the validasted state map. This will be present + // if we have the saved leaf, but if not we'll get it when we fetch_proposal. + let mut maybe_validated_view = parent_view_number.and_then(|view_number| { + consensus_reader + .validated_state_map() + .get(&view_number) + .cloned() + }); + + // Justify qc's leaf commitment should be the same as the parent's leaf commitment. + let mut maybe_parent = consensus_reader + .saved_leaves() + .get(&justify_qc.data.leaf_commit) + .cloned(); + + drop(consensus_reader); + + maybe_parent = match maybe_parent { + Some(p) => Some(p), + None => { + match fetch_proposal( + justify_qc.view_number(), + sender.clone(), + receiver.activate_cloned(), + Arc::clone(&quorum_membership), + OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), + public_key.clone(), + private_key.clone(), + &upgrade_lock, + ) + .await + .ok() + { + Some((leaf, view)) => { + maybe_validated_view = Some(view); + Some(leaf) + } + None => None, + } + } + }; + + let parent = maybe_parent.context(info!( + "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", + justify_qc.data.leaf_commit, + proposed_leaf.view_number(), + ))?; + + let Some(validated_view) = maybe_validated_view else { + bail!( + "Failed to fetch view for parent, parent view {:?}", + parent_view_number + ); + }; + + let (Some(parent_state), _) = validated_view.state_and_delta() else { + bail!("Parent state not found! Consensus internally inconsistent"); + }; + + let version = upgrade_lock.version(view_number).await?; + + let (validated_state, state_delta) = parent_state + .validate_and_apply_header( + &instance_state, + &parent, + &proposed_leaf.block_header().clone(), + vid_share.data.common.clone(), + version, + *view_number, + ) + .await + .wrap() + .context(warn!("Block header doesn't extend the proposal!"))?; + + let state = Arc::new(validated_state); + let delta = Arc::new(state_delta); + + // Now that we've rounded everyone up, we need to update the shared state + let mut consensus_writer = consensus.write().await; + + if let Err(e) = consensus_writer + .update_leaf( + proposed_leaf.clone(), + Arc::clone(&state), + Some(Arc::clone(&delta)), + &upgrade_lock, + ) + .await + { + tracing::trace!("{e:?}"); + } + + // Kick back our updated structures for downstream usage. + let new_leaves = consensus_writer.saved_leaves().clone(); + let new_state = consensus_writer.validated_state_map().clone(); + drop(consensus_writer); + + // Send the new state up to the sequencer. + storage + .write() + .await + .update_undecided_state(new_leaves, new_state) + .await + .wrap() + .context(error!("Failed to update undecided state"))?; + + Ok(()) +} + +/// Submits the `QuorumVoteSend` event if all the dependencies are met. +#[instrument(skip_all, fields(name = "Submit quorum vote", level = "error"))] +#[allow(clippy::too_many_arguments)] +pub(crate) async fn submit_vote, V: Versions>( + sender: Sender>>, + quorum_membership: Arc, + public_key: TYPES::SignatureKey, + private_key: ::PrivateKey, + upgrade_lock: UpgradeLock, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, + storage: Arc>, + leaf: Leaf, + vid_share: Proposal>, + extended_vote: bool, +) -> Result<()> { + ensure!( + quorum_membership.has_stake(&public_key, epoch_number), + info!( + "We were not chosen for quorum committee on {:?}", + view_number + ) + ); + + // Create and send the vote. + let vote = QuorumVote::::create_signed_vote( + QuorumData { + leaf_commit: leaf.commit(&upgrade_lock).await, + }, + view_number, + &public_key, + &private_key, + &upgrade_lock, + ) + .await + .wrap() + .context(error!("Failed to sign vote. This should never happen."))?; + tracing::debug!( + "sending vote to next quorum leader {:?}", + vote.view_number() + 1 + ); + // Add to the storage. + storage + .write() + .await + .append_vid(&vid_share) + .await + .wrap() + .context(error!("Failed to store VID share"))?; + + if extended_vote { + broadcast_event( + Arc::new(HotShotEvent::ExtendedQuorumVoteSend(vote)), + &sender, + ) + .await; + } else { + broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &sender).await; + } + + Ok(()) +} diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index 3f78573d50..6ac5fe37dd 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -6,45 +6,40 @@ use std::{collections::BTreeMap, sync::Arc}; +use crate::{ + events::HotShotEvent, + helpers::broadcast_event, + quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, +}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::{ - dependency::{AndDependency, Dependency, EventDependency}, + dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; use hotshot_types::{ consensus::OuterConsensus, - data::{Leaf, VidDisperseShare, ViewNumber}, + data::{Leaf, QuorumProposal}, event::Event, message::{Proposal, UpgradeLock}, - simple_vote::{QuorumData, QuorumVote}, traits::{ block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, - ValidatedState, }, - utils::{View, ViewInner}, + utils::epoch_from_block_number, vid::vid_scheme, vote::{Certificate, HasViewNumber}, }; use jf_vid::VidScheme; -#[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; use utils::anytrace::*; - -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task, fetch_proposal}, - quorum_vote::handlers::handle_quorum_proposal_validated, -}; +use vbs::version::StaticVersionType; /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -76,8 +71,6 @@ pub struct VoteDependencyHandle, V pub storage: Arc>, /// View number to vote on. pub view_number: TYPES::View, - /// Epoch number to vote on. - pub epoch_number: TYPES::Epoch, /// Event sender. pub sender: Sender>>, /// Event receiver. @@ -86,162 +79,8 @@ pub struct VoteDependencyHandle, V pub upgrade_lock: UpgradeLock, /// The node's id pub id: u64, -} - -impl + 'static, V: Versions> - VoteDependencyHandle -{ - /// Updates the shared consensus state with the new voting data. - #[instrument(skip_all, target = "VoteDependencyHandle", fields(id = self.id, view = *self.view_number))] - async fn update_shared_state( - &self, - proposed_leaf: &Leaf, - vid_share: &Proposal>, - ) -> Result<()> { - let justify_qc = &proposed_leaf.justify_qc(); - - // Justify qc's leaf commitment should be the same as the parent's leaf commitment. - let mut maybe_parent = self - .consensus - .read() - .await - .saved_leaves() - .get(&justify_qc.data().leaf_commit) - .cloned(); - maybe_parent = match maybe_parent { - Some(p) => Some(p), - None => fetch_proposal( - justify_qc.view_number(), - self.sender.clone(), - self.receiver.activate_cloned(), - Arc::clone(&self.quorum_membership), - OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), - self.public_key.clone(), - self.private_key.clone(), - &self.upgrade_lock, - ) - .await - .ok(), - }; - let parent = maybe_parent.context(info!( - "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", - justify_qc.data().leaf_commit, - proposed_leaf.view_number(), - ))?; - let consensus_reader = self.consensus.read().await; - - let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { - bail!("Parent state not found! Consensus internally inconsistent"); - }; - - drop(consensus_reader); - - let version = self.upgrade_lock.version(self.view_number).await?; - - let (validated_state, state_delta) = parent_state - .validate_and_apply_header( - &self.instance_state, - &parent, - &proposed_leaf.block_header().clone(), - vid_share.data.common.clone(), - version, - ) - .await - .wrap() - .context(warn!("Block header doesn't extend the proposal!"))?; - - let state = Arc::new(validated_state); - let delta = Arc::new(state_delta); - - // Now that we've rounded everyone up, we need to update the shared state and broadcast our events. - // We will defer broadcast until all states are updated to avoid holding onto the lock during a network call. - let mut consensus_writer = self.consensus.write().await; - - let view = View { - view_inner: ViewInner::Leaf { - leaf: proposed_leaf.commit(&self.upgrade_lock).await, - state: Arc::clone(&state), - delta: Some(Arc::clone(&delta)), - }, - }; - if let Err(e) = - consensus_writer.update_validated_state_map(proposed_leaf.view_number(), view.clone()) - { - tracing::trace!("{e:?}"); - } - consensus_writer - .update_saved_leaves(proposed_leaf.clone(), &self.upgrade_lock) - .await; - - // Kick back our updated structures for downstream usage. - let new_leaves = consensus_writer.saved_leaves().clone(); - let new_state = consensus_writer.validated_state_map().clone(); - drop(consensus_writer); - - // Broadcast now that the lock is dropped. - broadcast_event( - HotShotEvent::ValidatedStateUpdated(proposed_leaf.view_number(), view).into(), - &self.sender, - ) - .await; - - // Send the new state up to the sequencer. - self.storage - .write() - .await - .update_undecided_state(new_leaves, new_state) - .await - .wrap() - .context(error!("Failed to update undecided state"))?; - - Ok(()) - } - - /// Submits the `QuorumVoteSend` event if all the dependencies are met. - #[instrument(skip_all, fields(id = self.id, name = "Submit quorum vote", level = "error"))] - async fn submit_vote( - &self, - leaf: Leaf, - vid_share: Proposal>, - ) -> Result<()> { - ensure!( - self.quorum_membership - .has_stake(&self.public_key, self.epoch_number), - info!( - "We were not chosen for quorum committee on {:?}", - self.view_number - ) - ); - - // Create and send the vote. - let vote = QuorumVote::::create_signed_vote( - QuorumData { - leaf_commit: leaf.commit(&self.upgrade_lock).await, - }, - self.view_number, - &self.public_key, - &self.private_key, - &self.upgrade_lock, - ) - .await - .wrap() - .context(error!("Failed to sign vote. This should never happen."))?; - tracing::debug!( - "sending vote to next quorum leader {:?}", - vote.view_number() + 1 - ); - // Add to the storage. - self.storage - .write() - .await - .append_vid(&vid_share) - .await - .wrap() - .context(error!("Failed to store VID share"))?; - broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; - - Ok(()) - } + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl + 'static, V: Versions> HandleDepOutput @@ -250,43 +89,37 @@ impl + 'static, V: Versions> Handl type Output = Vec>>; #[allow(clippy::too_many_lines)] + #[instrument(skip_all, fields(id = self.id, view = *self.view_number))] async fn handle_dep_result(self, res: Self::Output) { - let high_qc_view_number = self.consensus.read().await.high_qc().view_number; - - // The validated state of a non-genesis high QC should exist in the state map. - if *high_qc_view_number != *ViewNumber::genesis() - && !self - .consensus - .read() - .await - .validated_state_map() - .contains_key(&high_qc_view_number) - { - // Block on receiving the event from the event stream. - EventDependency::new( - self.receiver.activate_cloned(), - Box::new(move |event| { - let event = event.as_ref(); - if let HotShotEvent::ValidatedStateUpdated(view_number, _) = event { - *view_number == high_qc_view_number - } else { - false - } - }), - ) - .completed() - .await; - } - let mut payload_commitment = None; let mut leaf = None; let mut vid_share = None; + let mut parent_view_number = None; for event in res { match event.as_ref() { #[allow(unused_assignments)] HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { + let version = match self.upgrade_lock.version(self.view_number).await { + Ok(version) => version, + Err(e) => { + tracing::error!("{e:#}"); + return; + } + }; let proposal_payload_comm = proposal.data.block_header.payload_commitment(); - if let Some(ref comm) = payload_commitment { + let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + + if version >= V::Epochs::VERSION + && self + .consensus + .read() + .await + .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit) + { + tracing::debug!("Do not vote here. Voting for this case is handled in QuorumVoteTaskState"); + return; + } else if let Some(ref comm) = payload_commitment { if proposal_payload_comm != *comm { tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; @@ -294,19 +127,19 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(proposal_payload_comm); } - let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; - let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + if proposed_leaf.parent_commitment() != parent_commitment { tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } - // Update our persistent storage of the proposal. If we cannot store the proposal reutrn + // Update our persistent storage of the proposal. If we cannot store the proposal return // and error so we don't vote if let Err(e) = self.storage.write().await.append_proposal(proposal).await { tracing::error!("failed to store proposal, not voting. error = {e:#}"); return; } leaf = Some(proposed_leaf); + parent_view_number = Some(parent_leaf.view_number()); } HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = &cert.data().payload_commit; @@ -334,13 +167,6 @@ impl + 'static, V: Versions> Handl _ => {} } } - broadcast_event( - Arc::new(HotShotEvent::QuorumVoteDependenciesValidated( - self.view_number, - )), - &self.sender, - ) - .await; let Some(vid_share) = vid_share else { tracing::error!( @@ -359,12 +185,63 @@ impl + 'static, V: Versions> Handl }; // Update internal state - if let Err(e) = self.update_shared_state(&leaf, &vid_share).await { + if let Err(e) = update_shared_state::( + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + self.sender.clone(), + self.receiver.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + self.view_number, + Arc::clone(&self.instance_state), + Arc::clone(&self.storage), + &leaf, + &vid_share, + parent_view_number, + ) + .await + { tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } - if let Err(e) = self.submit_vote(leaf, vid_share).await { + let current_epoch = + TYPES::Epoch::new(epoch_from_block_number(leaf.height(), self.epoch_height)); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + self.view_number + 1, + *current_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange( + self.view_number + 1, + current_epoch, + )), + &self.sender, + ) + .await; + + let is_vote_leaf_extended = self + .consensus + .read() + .await + .is_leaf_extended(leaf.commit(&self.upgrade_lock).await); + if let Err(e) = submit_vote::( + self.sender.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + self.view_number, + current_epoch, + Arc::clone(&self.storage), + leaf, + vid_share, + is_vote_leaf_extended, + ) + .await + { tracing::debug!("Failed to vote; error = {e:#}"); } } @@ -412,6 +289,9 @@ pub struct QuorumVoteTaskState, V: /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl, V: Versions> QuorumVoteTaskState { @@ -465,16 +345,10 @@ impl, V: Versions> QuorumVoteTaskS fn create_dependency_task_if_new( &mut self, view_number: TYPES::View, - epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: &Sender>>, event: Option>>, ) { - if view_number <= self.latest_voted_view { - tracing::trace!("We have already voted for this view"); - return; - } - if self.vote_dependencies.contains_key(&view_number) { return; } @@ -496,6 +370,7 @@ impl, V: Versions> QuorumVoteTaskS } let deps = vec![quorum_proposal_dependency, dac_dependency, vid_dependency]; + let dependency_chain = AndDependency::from_deps(deps); let dependency_task = DependencyTask::new( @@ -508,11 +383,11 @@ impl, V: Versions> QuorumVoteTaskS quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), view_number, - epoch_number, sender: event_sender.clone(), receiver: event_receiver.clone().deactivate(), upgrade_lock: self.upgrade_lock.clone(), id: self.id, + epoch_height: self.epoch_height, }, ); self.vote_dependencies @@ -532,7 +407,7 @@ impl, V: Versions> QuorumVoteTaskS // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { - cancel_task(dependency).await; + dependency.abort(); tracing::debug!("Vote dependency removed for view {:?}", view); } } @@ -553,29 +428,46 @@ impl, V: Versions> QuorumVoteTaskS event_sender: Sender>>, ) -> Result<()> { match event.as_ref() { - HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { - let cur_epoch = self.consensus.read().await.cur_epoch(); + HotShotEvent::QuorumProposalValidated(proposal, parent_leaf) => { tracing::trace!( "Received Proposal for view {}", *proposal.data.view_number() ); // Handle the event before creating the dependency task. - if let Err(e) = - handle_quorum_proposal_validated(&proposal.data, &event_sender, self).await - { + if let Err(e) = handle_quorum_proposal_validated(&proposal.data, self).await { tracing::debug!( "Failed to handle QuorumProposalValidated event; error = {e:#}" ); } - self.create_dependency_task_if_new( - proposal.data.view_number, - cur_epoch, - event_receiver, - &event_sender, - Some(Arc::clone(&event)), + ensure!( + proposal.data.view_number() > self.latest_voted_view, + "We have already voted for this view" ); + + let version = self + .upgrade_lock + .version(proposal.data.view_number()) + .await?; + + let is_justify_qc_forming_eqc = self + .consensus + .read() + .await + .is_leaf_forming_eqc(proposal.data.justify_qc.data.leaf_commit); + + if version >= V::Epochs::VERSION && is_justify_qc_forming_eqc { + self.handle_eqc_voting(proposal, parent_leaf, event_sender, event_receiver) + .await; + } else { + self.create_dependency_task_if_new( + proposal.data.view_number, + event_receiver, + &event_sender, + Some(Arc::clone(&event)), + ); + } } HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; @@ -606,13 +498,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new( - view, - cur_epoch, - event_receiver, - &event_sender, - None, - ); + self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); @@ -670,34 +556,26 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new( - view, - cur_epoch, - event_receiver, - &event_sender, - None, - ); - } - HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - tracing::debug!("All vote dependencies verified for view {:?}", view_number); - if !self.update_latest_voted_view(*view_number).await { - tracing::debug!("view not updated"); - } + self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); } HotShotEvent::Timeout(view) => { + let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks - let current_tasks = self.vote_dependencies.split_off(view); + let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { - cancel_task(task).await; + task.abort(); } self.vote_dependencies = current_tasks; } - HotShotEvent::ViewChange(mut view) => { + HotShotEvent::ViewChange(mut view, _) => { view = TYPES::View::new(view.saturating_sub(1)); + if !self.update_latest_voted_view(view).await { + tracing::debug!("view not updated"); + } // cancel old tasks let current_tasks = self.vote_dependencies.split_off(&view); while let Some((_, task)) = self.vote_dependencies.pop_last() { - cancel_task(task).await; + task.abort(); } self.vote_dependencies = current_tasks; } @@ -705,6 +583,121 @@ impl, V: Versions> QuorumVoteTaskS } Ok(()) } + + /// Handles voting for the last block in the epoch to form the Extended QC. + #[allow(clippy::too_many_lines)] + async fn handle_eqc_voting( + &self, + proposal: &Proposal>, + parent_leaf: &Leaf, + event_sender: Sender>>, + event_receiver: Receiver>>, + ) { + tracing::info!("Reached end of epoch. Justify QC is for the last block in the epoch."); + let proposed_leaf = Leaf::from_quorum_proposal(&proposal.data); + let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; + if proposed_leaf.height() != parent_leaf.height() + || proposed_leaf.payload_commitment() != parent_leaf.payload_commitment() + { + tracing::error!("Justify QC is for the last block but it's not extended and a new block is proposed. Not voting!"); + return; + } + + tracing::info!( + "Reached end of epoch. Proposed leaf has the same height and payload as its parent." + ); + + let mut consensus_writer = self.consensus.write().await; + let Some(vid_shares) = consensus_writer + .vid_shares() + .get(&parent_leaf.view_number()) + else { + tracing::warn!( + "Proposed leaf is the same as its parent but we don't have our VID for it" + ); + return; + }; + let Some(vid) = vid_shares.get(&self.public_key) else { + tracing::warn!( + "Proposed leaf is the same as its parent but we don't have our VID for it" + ); + return; + }; + let mut updated_vid = vid.clone(); + updated_vid.data.view_number = proposal.data.view_number; + consensus_writer.update_vid_shares(updated_vid.data.view_number, updated_vid.clone()); + drop(consensus_writer); + + if proposed_leaf.parent_commitment() != parent_commitment { + tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); + return; + } + // Update our persistent storage of the proposal. If we cannot store the proposal return + // and error so we don't vote + if let Err(e) = self.storage.write().await.append_proposal(proposal).await { + tracing::error!("failed to store proposal, not voting. error = {e:#}"); + return; + } + + // Update internal state + if let Err(e) = update_shared_state::( + OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), + event_sender.clone(), + event_receiver.clone().deactivate(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + proposal.data.view_number(), + Arc::clone(&self.instance_state), + Arc::clone(&self.storage), + &proposed_leaf, + &updated_vid, + Some(parent_leaf.view_number()), + ) + .await + { + tracing::error!("Failed to update shared consensus state; error = {e:#}"); + return; + } + + let current_block_number = proposed_leaf.height(); + let current_epoch = TYPES::Epoch::new(epoch_from_block_number( + current_block_number, + self.epoch_height, + )); + tracing::trace!( + "Sending ViewChange for view {} and epoch {}", + proposal.data.view_number() + 1, + *current_epoch + ); + broadcast_event( + Arc::new(HotShotEvent::ViewChange( + proposal.data.view_number() + 1, + current_epoch, + )), + &event_sender, + ) + .await; + + if let Err(e) = submit_vote::( + event_sender.clone(), + Arc::clone(&self.quorum_membership), + self.public_key.clone(), + self.private_key.clone(), + self.upgrade_lock.clone(), + proposal.data.view_number(), + current_epoch, + Arc::clone(&self.storage), + proposed_leaf, + updated_vid, + false, + ) + .await + { + tracing::debug!("Failed to vote; error = {e:#}"); + } + } } #[async_trait] @@ -722,11 +715,8 @@ impl, V: Versions> TaskState self.handle(event, receiver.clone(), sender.clone()).await } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { while let Some((_, handle)) = self.vote_dependencies.pop_last() { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index abacf32ffd..1a565a85de 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -14,9 +14,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use async_trait::async_trait; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -34,8 +31,11 @@ use hotshot_types::{ }; use rand::{seq::SliceRandom, thread_rng}; use sha2::{Digest, Sha256}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{ + spawn, + task::JoinHandle, + time::{sleep, timeout}, +}; use tracing::instrument; use utils::anytrace::Result; @@ -75,7 +75,7 @@ pub struct NetworkRequestState> { impl> Drop for NetworkRequestState { fn drop(&mut self) { - futures::executor::block_on(async move { self.cancel_subtasks().await }); + self.cancel_subtasks(); } } @@ -112,7 +112,7 @@ impl> TaskState for NetworkRequest } Ok(()) } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, _) => { let view = *view; if view > self.view { self.view = view; @@ -123,7 +123,7 @@ impl> TaskState for NetworkRequest } } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { self.shutdown_flag.store(true, Ordering::Relaxed); while !self.spawned_tasks.is_empty() { @@ -132,9 +132,6 @@ impl> TaskState for NetworkRequest }; for handle in handles { - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] handle.abort(); } } @@ -204,10 +201,10 @@ impl> NetworkRequestState = spawn(async move { // Do the delay only if primary is up and then start sending if !network.is_primary_down() { - async_sleep(delay).await; + sleep(delay).await; } let mut recipients_it = recipients.iter(); @@ -280,7 +277,7 @@ impl> NetworkRequestState NetworkResponseState { .is_none() { // Sleep in hope we receive txns in the meantime - async_sleep(TXNS_TIMEOUT).await; + sleep(TXNS_TIMEOUT).await; Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, @@ -209,5 +205,5 @@ pub fn run_response_task( event_stream: Receiver>>, sender: Sender>>, ) -> JoinHandle<()> { - async_spawn(task_state.run_response_loop(event_stream, sender)) + spawn(task_state.run_response_loop(event_stream, sender)) } diff --git a/crates/task-impls/src/rewind.rs b/crates/task-impls/src/rewind.rs index 9ae424b62b..4f62359aeb 100644 --- a/crates/task-impls/src/rewind.rs +++ b/crates/task-impls/src/rewind.rs @@ -45,7 +45,7 @@ impl TaskState for RewindTaskState { Ok(()) } - async fn cancel_subtasks(&mut self) { + fn cancel_subtasks(&mut self) { tracing::info!("Node ID {} Recording {} events", self.id, self.events.len()); let filename = format!("rewind_{}.log", self.id); let mut file = match OpenOptions::new() diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 69ea07f1a5..43cdef8bfc 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -10,7 +10,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; @@ -31,6 +30,7 @@ use hotshot_types::{ utils::ViewInner, vid::{VidCommitment, VidPrecomputeData}, }; +use tokio::time::{sleep, timeout}; use tracing::instrument; use url::Url; use utils::anytrace::*; @@ -217,6 +217,7 @@ impl, V: Versions> TransactionTask let Some(null_fee) = null_block::builder_fee::( self.membership.total_nodes(self.cur_epoch), version, + *block_view, ) else { tracing::error!("Failed to get null fee"); return None; @@ -274,7 +275,7 @@ impl, V: Versions> TransactionTask let start = Instant::now(); - let maybe_auction_result = async_timeout( + let maybe_auction_result = timeout( self.builder_timeout, self.auction_results_provider .fetch_auction_result(block_view), @@ -293,7 +294,7 @@ impl, V: Versions> TransactionTask builder_urls.push(self.fallback_builder_url.clone()); for url in builder_urls { - futures.push(async_timeout( + futures.push(timeout( self.builder_timeout.saturating_sub(start.elapsed()), async { let client = BuilderClientMarketplace::new(url); @@ -361,6 +362,7 @@ impl, V: Versions> TransactionTask let Some(null_fee) = null_block::builder_fee::( self.membership.total_nodes(self.cur_epoch), version, + *block_view, ) else { tracing::error!("Failed to calculate null block fee."); return None; @@ -433,8 +435,24 @@ impl, V: Versions> TransactionTask None } + /// epochs view change handler + #[instrument(skip_all, fields(id = self.id, view_number = *self.cur_view))] + pub async fn handle_view_change_epochs( + &mut self, + event_stream: &Sender>>, + block_view: TYPES::View, + ) -> Option { + if self.consensus.read().await.is_high_qc_forming_eqc() { + tracing::info!("Reached end of epoch. Not getting a new block until we form an eQC."); + None + } else { + self.handle_view_change_marketplace(event_stream, block_view) + .await + } + } + /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction task", level = "error", target = "TransactionTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Transaction task", level = "error", target = "TransactionTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -453,42 +471,21 @@ impl, V: Versions> TransactionTask ) .await; } - HotShotEvent::ViewChange(view) => { - let view = *view; - - tracing::debug!("view change in transactions to view {:?}", view); - ensure!( - *view > *self.cur_view || *self.cur_view == 0, - debug!( - "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view - ) - ); - - let mut make_block = false; - if *view - *self.cur_view > 1 { - tracing::info!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view, self.cur_epoch)? == self.public_key; + HotShotEvent::ViewChange(view, epoch) => { + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; } - self.cur_view = view; - - let next_view = self.cur_view + 1; - let next_leader = - self.membership.leader(next_view, self.cur_epoch)? == self.public_key; - + let view = TYPES::View::new(std::cmp::max(1, **view)); ensure!( - make_block || next_leader, + *view > *self.cur_view, debug!( - "Not making the block because we are not leader for view {:?}", - self.cur_view + "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view ) ); - - if make_block { - self.handle_view_change(&event_stream, self.cur_view).await; - } - - if next_leader { - self.handle_view_change(&event_stream, next_view).await; + self.cur_view = view; + if self.membership.leader(view, self.cur_epoch)? == self.public_key { + self.handle_view_change(&event_stream, view).await; + return Ok(()); } } _ => {} @@ -510,7 +507,7 @@ impl, V: Versions> TransactionTask Err(e) if task_start_time.elapsed() >= self.builder_timeout => break Err(e), _ => { // We still have time, will re-try in a bit - async_sleep(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; continue; } } @@ -585,7 +582,7 @@ impl, V: Versions> TransactionTask }; while task_start_time.elapsed() < self.builder_timeout { - match async_timeout( + match timeout( self.builder_timeout .saturating_sub(task_start_time.elapsed()), self.block_from_builder(parent_comm, parent_view, &parent_comm_sig), @@ -601,7 +598,7 @@ impl, V: Versions> TransactionTask Ok(Err(err)) => { tracing::info!("Couldn't get a block: {err:#}"); // pause a bit - async_sleep(RETRY_DELAY).await; + sleep(RETRY_DELAY).await; continue; } @@ -656,7 +653,7 @@ impl, V: Versions> TransactionTask break; } } - let timeout = async_sleep(std::cmp::max( + let timeout = sleep(std::cmp::max( query_start .elapsed() .mul_f32(BUILDER_ADDITIONAL_TIME_MULTIPLIER), @@ -714,6 +711,13 @@ impl, V: Versions> TransactionTask bail!("No available blocks"); } + let version = match self.upgrade_lock.version(view_number).await { + Ok(v) => v, + Err(err) => { + bail!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + } + }; + for (block_info, builder_idx) in available_blocks { // Verify signature over chosen block. if !block_info.sender.validate_block_info_signature( @@ -732,7 +736,7 @@ impl, V: Versions> TransactionTask ) { Ok(request_signature) => request_signature, Err(err) => { - tracing::warn!(%err, "Failed to sign block hash"); + tracing::error!(%err, "Failed to sign block hash"); continue; } }; @@ -740,9 +744,18 @@ impl, V: Versions> TransactionTask let response = { let client = &self.builder_clients[builder_idx]; - let (block, header_input) = futures::join! { - client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), - client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + // If epochs are supported, provide the latest `num_nodes` information to the + // builder for VID computation. + let (block, header_input) = if version >= V::Epochs::VERSION { + futures::join! { + client.claim_block_with_num_nodes(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature, self.membership.total_nodes(self.cur_epoch)) , + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + } + } else { + futures::join! { + client.claim_block(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature), + client.claim_block_header_input(block_info.block_hash.clone(), view_number.u64(), self.public_key.clone(), &request_signature) + } }; let block_data = match block { @@ -814,5 +827,5 @@ impl, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index fb4f4de7f4..c5e9735701 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -37,7 +37,7 @@ use crate::{ vote_collection::{handle_vote, VoteCollectorsMap}, }; -/// Tracks state of a DA task +/// Tracks state of an upgrade task pub struct UpgradeTaskState, V: Versions> { /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -50,6 +50,7 @@ pub struct UpgradeTaskState, V: Ve /// Membership for Quorum Certs/votes pub quorum_membership: Arc, + /// The underlying network pub network: Arc, @@ -104,7 +105,7 @@ impl, V: Versions> UpgradeTaskStat } /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Upgrade Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "Upgrade Task", level = "error")] pub async fn handle( &mut self, event: Arc>, @@ -245,10 +246,14 @@ impl, V: Versions> UpgradeTaskStat &event, &tx, &self.upgrade_lock, + true, ) .await?; } - HotShotEvent::ViewChange(new_view) => { + HotShotEvent::ViewChange(new_view, epoch_number) => { + if *epoch_number > self.cur_epoch { + self.cur_epoch = *epoch_number; + } ensure!(self.cur_view < *new_view || *self.cur_view == 0); self.cur_view = *new_view; @@ -335,5 +340,5 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 68afabf6d0..3795577bbb 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -59,7 +59,7 @@ pub struct VidTaskState> { impl> VidTaskState { /// main task event handler - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error", target = "VidTaskState")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "VID Main Task", level = "error", target = "VidTaskState")] pub async fn handle( &mut self, event: Arc>, @@ -111,15 +111,6 @@ impl> VidTaskState { ) .await; - // send the block to the VID dispersal function - broadcast_event( - Arc::new(HotShotEvent::BlockReady(vid_disperse, *view_number)), - &event_stream, - ) - .await; - } - - HotShotEvent::BlockReady(vid_disperse, view_number) => { let view_number = *view_number; let Ok(signature) = TYPES::SignatureKey::sign( &self.private_key, @@ -143,7 +134,7 @@ impl> VidTaskState { .await; } - HotShotEvent::ViewChange(view) => { + HotShotEvent::ViewChange(view, epoch) => { let view = *view; if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { return None; @@ -153,6 +144,9 @@ impl> VidTaskState { info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; + if *epoch > self.cur_epoch { + self.cur_epoch = *epoch; + } return None; } @@ -181,5 +175,5 @@ impl> TaskState for VidTaskState, V: Versions> TaskState self.handle(event, sender.clone()).await } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } /// State of a view sync replica task @@ -201,7 +197,7 @@ impl, V: Versions> TaskState Ok(()) } - async fn cancel_subtasks(&mut self) {} + fn cancel_subtasks(&mut self) {} } impl, V: Versions> ViewSyncTaskState { @@ -269,7 +265,7 @@ impl, V: Versions> ViewSyncTaskSta task_map.insert(view, replica_state); } - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Main Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task pub async fn handle( @@ -336,9 +332,14 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.cur_epoch, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await?; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await?; relay_map.insert(relay, vote_collector); } @@ -377,9 +378,14 @@ impl, V: Versions> ViewSyncTaskSta id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await?; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await?; relay_map.insert(relay, vote_collector); } @@ -416,15 +422,23 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.cur_epoch, id: self.id, }; - let vote_collector = - create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; + let vote_collector = create_vote_accumulator( + &info, + event, + &event_stream, + self.upgrade_lock.clone(), + true, + ) + .await; if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } } - &HotShotEvent::ViewChange(new_view) => { + &HotShotEvent::ViewChange(new_view, epoch) => { + if epoch > self.cur_epoch { + self.cur_epoch = epoch; + } let new_view = TYPES::View::new(*new_view); if self.cur_view < new_view { tracing::debug!( @@ -439,7 +453,6 @@ impl, V: Versions> ViewSyncTaskSta // Garbage collect old tasks // We could put this into a separate async task, but that would require making several fields on ViewSyncTaskState thread-safe and harm readability. In the common case this will have zero tasks to clean up. - // cancel poll for votes // run GC for i in *self.last_garbage_collected_view..*self.cur_view { self.replica_task_map @@ -466,7 +479,7 @@ impl, V: Versions> ViewSyncTaskSta &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it ensure!( - view_number > self.cur_view, + view_number >= self.cur_view, debug!("Discarding old timeout vote.") ); @@ -495,9 +508,9 @@ impl, V: Versions> ViewSyncTaskSta .await; } else { // If this is the first timeout we've seen advance to the next view - self.cur_view = view_number; + self.cur_view = view_number + 1; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::View::new(*self.cur_view))), + Arc::new(HotShotEvent::ViewChange(self.cur_view, self.cur_epoch)), &event_stream, ) .await; @@ -513,7 +526,7 @@ impl, V: Versions> ViewSyncTaskSta impl, V: Versions> ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = *self.cur_epoch), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, @@ -577,17 +590,17 @@ impl, V: Versions> } if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let phase = last_seen_certificate; let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( @@ -663,23 +676,24 @@ impl, V: Versions> *self.next_view ); + // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view)), + Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), &event_stream, ) .await; if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let phase = last_seen_certificate; let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay @@ -726,11 +740,12 @@ impl, V: Versions> } if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } + // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view)), + Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), &event_stream, ) .await; @@ -769,13 +784,13 @@ impl, V: Versions> .await; } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -797,7 +812,7 @@ impl, V: Versions> // Shouldn't ever receive a timeout for a relay higher than ours if TYPES::View::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { - cancel_task(timeout_task).await; + timeout_task.abort(); } self.relay += 1; match last_seen_certificate { @@ -834,14 +849,14 @@ impl, V: Versions> } } - self.timeout_task = Some(async_spawn({ + self.timeout_task = Some(spawn({ let stream = event_stream.clone(); let relay = self.relay; let next_view = self.next_view; let timeout = self.view_sync_timeout; let last_cert = last_seen_certificate.clone(); async move { - async_sleep(timeout).await; + sleep(timeout).await; tracing::warn!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index 4c685ca978..c9266ae808 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -62,6 +62,9 @@ pub struct VoteCollectionTaskState< /// Node id pub id: u64, + + /// Whether we should check if we are the leader when handling a vote + pub check_if_leader: bool, } /// Describes the functions a vote must implement for it to be aggregatable by the generic vote collection task @@ -103,10 +106,12 @@ impl< vote: &VOTE, event_stream: &Sender>>, ) -> Result> { - ensure!( - vote.leader(&self.membership, self.epoch)? == self.public_key, - info!("Received vote for a view in which we were not the leader.") - ); + if self.check_if_leader { + ensure!( + vote.leader(&self.membership, self.epoch)? == self.public_key, + info!("Received vote for a view in which we were not the leader.") + ); + } ensure!( vote.view_number() == self.view, error!( @@ -189,6 +194,7 @@ pub async fn create_vote_accumulator( event: Arc>, sender: &Sender>>, upgrade_lock: UpgradeLock, + check_if_leader: bool, ) -> Result> where TYPES: NodeType, @@ -219,6 +225,7 @@ where view: info.view, epoch: info.epoch, id: info.id, + check_if_leader, }; state.handle_vote_event(Arc::clone(&event), sender).await?; @@ -246,6 +253,7 @@ pub async fn handle_vote< event: &Arc>, event_stream: &Sender>>, upgrade_lock: &UpgradeLock, + check_if_leader: bool, ) -> Result<()> where VoteCollectionTaskState: HandleVoteEvent, @@ -265,6 +273,7 @@ where Arc::clone(event), event_stream, upgrade_lock.clone(), + check_if_leader, ) .await?; diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 47261bfbd5..89461a58cb 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -11,20 +11,15 @@ edition = { workspace = true } futures = { workspace = true } async-broadcast = { workspace = true } tracing = { workspace = true } -async-compatibility-layer = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } utils = { path = "../utils" } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true, features = [ "time", "rt-multi-thread", "macros", "sync", ] } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true, features = ["attributes"] } [lints] workspace = true diff --git a/crates/task/src/dependency.rs b/crates/task/src/dependency.rs index c4eee030a8..7b3d7dfa0b 100644 --- a/crates/task/src/dependency.rs +++ b/crates/task/src/dependency.rs @@ -189,8 +189,7 @@ mod tests { } } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn it_works() { let (tx, rx) = broadcast(10); @@ -205,8 +204,8 @@ mod tests { let result = and.completed().await; assert_eq!(result, Some(vec![5; 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + + #[tokio::test(flavor = "multi_thread")] async fn or_dep() { let (tx, rx) = broadcast(10); @@ -220,8 +219,7 @@ mod tests { assert_eq!(result, Some(5)); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn and_or_dep() { let (tx, rx) = broadcast(10); @@ -238,8 +236,7 @@ mod tests { assert_eq!(result, Some(vec![6, 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn or_and_dep() { let (tx, rx) = broadcast(10); @@ -256,8 +253,7 @@ mod tests { assert_eq!(result, Some(vec![4, 5])); } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn many_and_dep() { let (tx, rx) = broadcast(10); diff --git a/crates/task/src/dependency_task.rs b/crates/task/src/dependency_task.rs index 2ebe4fc032..fb196151eb 100644 --- a/crates/task/src/dependency_task.rs +++ b/crates/task/src/dependency_task.rs @@ -4,10 +4,7 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use futures::Future; -#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; use crate::dependency::Dependency; @@ -57,10 +54,7 @@ mod test { use std::time::Duration; use async_broadcast::{broadcast, Receiver, Sender}; - #[cfg(async_executor_impl = "async-std")] - use async_std::task::sleep; use futures::{stream::FuturesOrdered, StreamExt}; - #[cfg(async_executor_impl = "tokio")] use tokio::time::sleep; use super::*; @@ -89,8 +83,7 @@ mod test { EventDependency::new(rx, Box::new(move |v| *v == val)) } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] // allow unused for tokio because it's a test #[allow(unused_must_use)] async fn it_works() { @@ -105,8 +98,7 @@ mod test { join_handle.await; } - #[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[tokio::test(flavor = "multi_thread")] async fn many_works() { let (tx, rx) = broadcast(20); let (res_tx, mut res_rx) = broadcast(20); diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index 6d0e0ca461..2b4784d00c 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -7,14 +7,8 @@ use std::sync::Arc; use async_broadcast::{Receiver, RecvError, Sender}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; -#[cfg(async_executor_impl = "async-std")] -use futures::future::join_all; -#[cfg(async_executor_impl = "tokio")] use futures::future::try_join_all; -#[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; use utils::anytrace::Result; @@ -34,7 +28,7 @@ pub trait TaskState: Send { type Event: TaskEvent + Clone + Send + Sync; /// Joins all subtasks. - async fn cancel_subtasks(&mut self); + fn cancel_subtasks(&mut self); /// Handles an event, providing direct access to the specific channel we received the event on. async fn handle_event( @@ -83,7 +77,7 @@ impl Task { match self.receiver.recv_direct().await { Ok(input) => { if *input == S::Event::shutdown_event() { - self.state.cancel_subtasks().await; + self.state.cancel_subtasks(); break self.boxed_state(); } @@ -133,12 +127,9 @@ impl ConsensusTaskRegistry { let handles = &mut self.task_handles; while let Some(handle) = handles.pop() { - #[cfg(async_executor_impl = "async-std")] - let mut task_state = handle.await; - #[cfg(async_executor_impl = "tokio")] let mut task_state = handle.await.unwrap(); - task_state.cancel_subtasks().await; + task_state.cancel_subtasks(); } } /// Take a task, run it, and register it @@ -153,12 +144,7 @@ impl ConsensusTaskRegistry { /// # Panics /// Panics if one of the tasks panicked pub async fn join_all(self) -> Vec>> { - #[cfg(async_executor_impl = "async-std")] - let states = join_all(self.task_handles).await; - #[cfg(async_executor_impl = "tokio")] - let states = try_join_all(self.task_handles).await.unwrap(); - - states + try_join_all(self.task_handles).await.unwrap() } } @@ -187,9 +173,6 @@ impl NetworkTaskRegistry { /// tasks being joined return an error. pub async fn shutdown(&mut self) { let handles = std::mem::take(&mut self.handles); - #[cfg(async_executor_impl = "async-std")] - join_all(handles).await; - #[cfg(async_executor_impl = "tokio")] try_join_all(handles) .await .expect("Failed to join all tasks during shutdown"); diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index 50b0231afe..8f8e59500f 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -18,7 +18,6 @@ broken_3_chain_fixed = [] automod = "1.0.14" anyhow = { workspace = true } async-broadcast = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true } @@ -52,16 +51,5 @@ vec1 = { workspace = true } reqwest = { workspace = true } url = { workspace = true } itertools = "0.13.0" - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(async_executor_impl, values("async-std"))', - 'cfg(async_executor_impl, values("tokio"))', - 'cfg(hotshot_example)', -] } diff --git a/crates/testing/src/block_builder/mod.rs b/crates/testing/src/block_builder/mod.rs index b047c7710e..a488b96dab 100644 --- a/crates/testing/src/block_builder/mod.rs +++ b/crates/testing/src/block_builder/mod.rs @@ -4,10 +4,10 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use async_broadcast::Receiver; -use async_compatibility_layer::art::async_spawn; +use async_lock::RwLock; use async_trait::async_trait; use futures::Stream; use hotshot::{traits::BlockPayload, types::Event}; @@ -28,6 +28,7 @@ use hotshot_types::{ }, }; use tide_disco::{method::ReadState, App, Url}; +use tokio::spawn; use vbs::version::StaticVersionType; use crate::test_builder::BuilderChange; @@ -85,7 +86,7 @@ pub fn run_builder_source( + v0_1::data_source::BuilderDataSource + v0_3::data_source::BuilderDataSource, { - async_spawn(async move { + spawn(async move { let start_builder = |url: Url, source: Source| -> _ { let builder_api_0_1 = hotshot_builder_api::v0_1::builder::define_api::( &Options::default(), @@ -100,7 +101,7 @@ pub fn run_builder_source( .expect("Failed to register the builder API 0.1") .register_module(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) .expect("Failed to register the builder API 0.3"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); @@ -112,10 +113,7 @@ pub fn run_builder_source( } BuilderChange::Down => { if let Some(handle) = handle.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } _ => {} @@ -138,7 +136,7 @@ pub fn run_builder_source_0_1( Source: Clone + Send + Sync + tide_disco::method::ReadState + 'static, ::State: Sync + Send + v0_1::data_source::BuilderDataSource, { - async_spawn(async move { + spawn(async move { let start_builder = |url: Url, source: Source| -> _ { let builder_api = hotshot_builder_api::v0_1::builder::define_api::( &Options::default(), @@ -147,7 +145,7 @@ pub fn run_builder_source_0_1( let mut app: App = App::with_state(source); app.register_module(LEGACY_BUILDER_MODULE, builder_api) .expect("Failed to register the builder API"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())) }; let mut handle = Some(start_builder(url.clone(), source.clone())); @@ -159,10 +157,7 @@ pub fn run_builder_source_0_1( } BuilderChange::Down => { if let Some(handle) = handle.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } _ => {} @@ -174,7 +169,7 @@ pub fn run_builder_source_0_1( /// Helper function to construct all builder data structures from a list of transactions async fn build_block( transactions: Vec, - num_storage_nodes: usize, + num_storage_nodes: Arc>, pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, ) -> BlockEntry @@ -192,7 +187,7 @@ where let commitment = block_payload.builder_commitment(&metadata); let (vid_commitment, precompute_data) = - precompute_vid_commitment(&block_payload.encode(), num_storage_nodes); + precompute_vid_commitment(&block_payload.encode(), *num_storage_nodes.read_arc().await); // Get block size from the encoded payload let block_size = block_payload.encode().len() as u64; diff --git a/crates/testing/src/block_builder/random.rs b/crates/testing/src/block_builder/random.rs index cea9e40328..5f14578a5e 100644 --- a/crates/testing/src/block_builder/random.rs +++ b/crates/testing/src/block_builder/random.rs @@ -16,7 +16,6 @@ use std::{ }; use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_trait::async_trait; use futures::{future::BoxFuture, Stream, StreamExt}; @@ -36,6 +35,7 @@ use hotshot_types::{ use lru::LruCache; use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; use tide_disco::{method::ReadState, Url}; +use tokio::{spawn, time::sleep}; use super::{ build_block, run_builder_source_0_1, BlockEntry, BuilderTask, TestBuilderImplementation, @@ -46,7 +46,7 @@ pub struct RandomBuilderImplementation; impl RandomBuilderImplementation { pub async fn create>( - num_storage_nodes: usize, + num_nodes: usize, config: RandomBuilderConfig, changes: HashMap, change_sender: Sender, @@ -57,15 +57,17 @@ impl RandomBuilderImplementation { let (pub_key, priv_key) = TYPES::BuilderSignatureKey::generated_from_seed_indexed([1; 32], 0); let blocks = Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))); + let num_nodes = Arc::new(RwLock::new(num_nodes)); let source = RandomBuilderSource { blocks: Arc::clone(&blocks), pub_key: pub_key.clone(), + num_nodes: num_nodes.clone(), should_fail_claims: Arc::new(AtomicBool::new(false)), }; let task = RandomBuilderTask { blocks, config, - num_storage_nodes, + num_nodes: num_nodes.clone(), changes, change_sender, pub_key, @@ -84,21 +86,21 @@ where type Config = RandomBuilderConfig; async fn start( - num_storage_nodes: usize, + num_nodes: usize, url: Url, config: RandomBuilderConfig, changes: HashMap, ) -> Box> { let (change_sender, change_receiver) = broadcast(128); - let (task, source) = Self::create(num_storage_nodes, config, changes, change_sender).await; + let (task, source) = Self::create(num_nodes, config, changes, change_sender).await; run_builder_source_0_1(url, change_receiver, source); Box::new(task) } } pub struct RandomBuilderTask> { - num_storage_nodes: usize, + num_nodes: Arc>, config: RandomBuilderConfig, changes: HashMap, change_sender: Sender, @@ -110,7 +112,7 @@ pub struct RandomBuilderTask> { impl> RandomBuilderTask { async fn build_blocks( options: RandomBuilderConfig, - num_storage_nodes: usize, + num_nodes: Arc>, pub_key: ::BuilderSignatureKey, priv_key: <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, blocks: Arc>>>, @@ -136,7 +138,7 @@ impl> RandomBuilderTask { let block = build_block( transactions, - num_storage_nodes, + num_nodes.clone(), pub_key.clone(), priv_key.clone(), ) @@ -156,7 +158,7 @@ impl> RandomBuilderTask { time_per_block.as_millis(), ); } - async_sleep(time_per_block.saturating_sub(start.elapsed())).await; + sleep(time_per_block.saturating_sub(start.elapsed())).await; } } } @@ -169,15 +171,15 @@ where mut self: Box, mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { - let mut task = Some(async_spawn(Self::build_blocks( + let mut task = Some(spawn(Self::build_blocks( self.config.clone(), - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), self.blocks.clone(), ))); - async_spawn(async move { + spawn(async move { loop { match stream.next().await { None => { @@ -189,9 +191,9 @@ where match change { BuilderChange::Up => { if task.is_none() { - task = Some(async_spawn(Self::build_blocks( + task = Some(spawn(Self::build_blocks( self.config.clone(), - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), self.blocks.clone(), @@ -200,10 +202,7 @@ where } BuilderChange::Down => { if let Some(handle) = task.take() { - #[cfg(async_executor_impl = "tokio")] handle.abort(); - #[cfg(async_executor_impl = "async-std")] - handle.cancel().await; } } BuilderChange::FailClaims(_) => {} @@ -232,6 +231,7 @@ pub struct RandomBuilderSource { >, >, pub_key: TYPES::BuilderSignatureKey, + num_nodes: Arc>, should_fail_claims: Arc, } @@ -243,10 +243,11 @@ where /// Create new [`RandomBuilderSource`] #[must_use] #[allow(clippy::missing_panics_doc)] // ony panics if 256 == 0 - pub fn new(pub_key: TYPES::BuilderSignatureKey) -> Self { + pub fn new(pub_key: TYPES::BuilderSignatureKey, num_nodes: Arc>) -> Self { Self { blocks: Arc::new(RwLock::new(LruCache::new(NonZeroUsize::new(256).unwrap()))), pub_key, + num_nodes, should_fail_claims: Arc::new(AtomicBool::new(false)), } } @@ -304,6 +305,19 @@ impl BuilderDataSource for RandomBuilderSource { Ok(payload) } + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError> { + *self.num_nodes.write().await = num_nodes; + self.claim_block(block_hash, view_number, sender, signature) + .await + } + async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, diff --git a/crates/testing/src/block_builder/simple.rs b/crates/testing/src/block_builder/simple.rs index e1207a4fc2..e490d8c598 100644 --- a/crates/testing/src/block_builder/simple.rs +++ b/crates/testing/src/block_builder/simple.rs @@ -15,7 +15,6 @@ use std::{ }; use async_broadcast::{broadcast, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; use async_trait::async_trait; use committable::{Commitment, Committable}; @@ -25,8 +24,8 @@ use hotshot::{ types::{Event, EventType, SignatureKey}, }; use hotshot_builder_api::{ - v0_1, v0_1::{ + self, block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::{BuildError, Error, Options}, }, @@ -45,6 +44,7 @@ use hotshot_types::{ }; use lru::LruCache; use tide_disco::{method::ReadState, App, Url}; +use tokio::spawn; use vbs::version::StaticVersionType; use super::{build_block, run_builder_source, BlockEntry, BuilderTask, TestBuilderImplementation}; @@ -54,7 +54,7 @@ pub struct SimpleBuilderImplementation; impl SimpleBuilderImplementation { pub async fn create( - num_storage_nodes: usize, + num_nodes: usize, changes: HashMap, change_sender: Sender, ) -> (SimpleBuilderSource, SimpleBuilderTask) { @@ -70,7 +70,7 @@ impl SimpleBuilderImplementation { priv_key, transactions: transactions.clone(), blocks: blocks.clone(), - num_storage_nodes, + num_nodes: Arc::new(RwLock::new(num_nodes)), should_fail_claims: Arc::clone(&should_fail_claims), }; @@ -95,13 +95,13 @@ where type Config = (); async fn start( - num_storage_nodes: usize, + num_nodes: usize, url: Url, _config: Self::Config, changes: HashMap, ) -> Box> { let (change_sender, change_receiver) = broadcast(128); - let (source, task) = Self::create(num_storage_nodes, changes, change_sender).await; + let (source, task) = Self::create(num_nodes, changes, change_sender).await; run_builder_source(url, change_receiver, source); Box::new(task) @@ -112,7 +112,7 @@ where pub struct SimpleBuilderSource { pub_key: TYPES::BuilderSignatureKey, priv_key: ::BuilderPrivateKey, - num_storage_nodes: usize, + num_nodes: Arc>, #[allow(clippy::type_complexity)] transactions: Arc, SubmittedTransaction>>>, blocks: Arc>>>, @@ -248,7 +248,7 @@ where let block_entry = build_block( transactions, - self.num_storage_nodes, + self.num_nodes.clone(), self.pub_key.clone(), self.priv_key.clone(), ) @@ -297,6 +297,19 @@ where Ok(payload) } + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: TYPES::SignatureKey, + signature: &::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError> { + *self.num_nodes.write().await = num_nodes; + self.claim_block(block_hash, view_number, sender, signature) + .await + } + async fn claim_block_header_input( &self, block_hash: &BuilderCommitment, @@ -341,7 +354,7 @@ impl SimpleBuilderSource { .register_module::(MARKETPLACE_BUILDER_MODULE, builder_api_0_3) .expect("Failed to register builder API 0.3"); - async_spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); + spawn(app.serve(url, hotshot_builder_api::v0_1::Version::instance())); } } @@ -367,7 +380,7 @@ impl BuilderTask for SimpleBuilderTask { mut self: Box, mut stream: Box> + std::marker::Unpin + Send + 'static>, ) { - async_spawn(async move { + spawn(async move { let mut should_build_blocks = true; loop { match stream.next().await { diff --git a/crates/testing/src/byzantine/byzantine_behaviour.rs b/crates/testing/src/byzantine/byzantine_behaviour.rs index 30233dd269..1ab38f1f29 100644 --- a/crates/testing/src/byzantine/byzantine_behaviour.rs +++ b/crates/testing/src/byzantine/byzantine_behaviour.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -18,7 +18,7 @@ use hotshot_task_impls::{ }, }; use hotshot_types::{ - consensus::Consensus, + consensus::{Consensus, OuterConsensus}, data::QuorumProposal, message::{Proposal, UpgradeLock}, simple_vote::QuorumVote, @@ -349,8 +349,9 @@ impl + std::fmt::Debug, V: Version quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), - consensus: Arc::clone(&handle.consensus()), + consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), + transmit_tasks: BTreeMap::new(), }; let modified_network_state = NetworkEventTaskStateModifier { network_event_task_state: network_state, diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index 711794d76b..5806f8d1c8 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -7,12 +7,8 @@ use std::time::Duration; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_spawn, async_timeout}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use hotshot_task_impls::helpers::broadcast_event; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{spawn, task::JoinHandle, time::timeout}; use crate::test_task::TestEvent; @@ -27,8 +23,8 @@ pub struct CompletionTask { impl CompletionTask { pub fn run(mut self) -> JoinHandle<()> { - async_spawn(async move { - if async_timeout(self.duration, self.wait_for_shutdown()) + spawn(async move { + if timeout(self.duration, self.wait_for_shutdown()) .await .is_err() { diff --git a/crates/testing/src/helpers.rs b/crates/testing/src/helpers.rs index 842ef3e538..40760372c6 100644 --- a/crates/testing/src/helpers.rs +++ b/crates/testing/src/helpers.rs @@ -92,7 +92,7 @@ pub async fn build_system_handle_from_launcher< let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = (launcher.resource_generator.storage)(node_id); let marketplace_config = (launcher.resource_generator.marketplace_config)(node_id); - let mut config = launcher.resource_generator.config.clone(); + let config = launcher.resource_generator.config.clone(); let initializer = HotShotInitializer::::from_genesis::(TestInstanceState::new( launcher.metadata.async_delay_config.clone(), @@ -104,11 +104,10 @@ pub async fn build_system_handle_from_launcher< let is_da = node_id < config.da_staked_committee_size as u64; // We assign node's public key and stake value rather than read from config file since it's a test - let validator_config = + let validator_config: ValidatorConfig = ValidatorConfig::generated_from_seed_indexed([0u8; 32], node_id, 1, is_da); - config.my_own_validator_config = validator_config; - let private_key = config.my_own_validator_config.private_key.clone(); - let public_key = config.my_own_validator_config.public_key.clone(); + let private_key = validator_config.private_key.clone(); + let public_key = validator_config.public_key.clone(); let all_nodes = config.known_nodes_with_stake.clone(); let da_nodes = config.known_da_nodes.clone(); diff --git a/crates/testing/src/lib.rs b/crates/testing/src/lib.rs index 43861fcbf2..20d513dc2a 100644 --- a/crates/testing/src/lib.rs +++ b/crates/testing/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr( // hotshot_example option is set manually in justfile when running examples - not(any(test, debug_assertions, hotshot_example)), + not(any(test, debug_assertions)), deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 979c2c2a04..ba7136c90f 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -498,6 +498,7 @@ impl RoundResult { } if let Some((n_txn, _)) = self.num_txns_map.iter().last() { if *n_txn < transaction_threshold { + tracing::error!("not enough transactions for view {:?}", key.view_number()); self.status = ViewStatus::Failed; return; } diff --git a/crates/testing/src/predicates/event.rs b/crates/testing/src/predicates/event.rs index d8484b3bbc..b188338839 100644 --- a/crates/testing/src/predicates/event.rs +++ b/crates/testing/src/predicates/event.rs @@ -136,17 +136,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn leaf_decided() -> Box> -where - TYPES: NodeType, -{ - let info = "LeafDecided".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), LeafDecided(_))); - - Box::new(EventPredicate { check, info }) -} - pub fn quorum_vote_send() -> Box> where TYPES: NodeType, @@ -164,7 +153,7 @@ where { let info = "ViewChange".to_string(); let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewChange(_))); + Arc::new(move |e: Arc>| matches!(e.as_ref(), ViewChange(_, _))); Box::new(EventPredicate { check, info }) } @@ -262,17 +251,6 @@ where Box::new(EventPredicate { check, info }) } -pub fn validated_state_updated() -> Box> -where - TYPES: NodeType, -{ - let info = "ValidatedStateUpdated".to_string(); - let check: EventCallback = Arc::new(move |e: Arc>| { - matches!(e.as_ref(), ValidatedStateUpdated(..)) - }); - Box::new(EventPredicate { check, info }) -} - pub fn vid_share_validated() -> Box> where TYPES: NodeType, @@ -304,24 +282,3 @@ where }); Box::new(EventPredicate { check, info }) } - -pub fn high_qc_updated() -> Box> -where - TYPES: NodeType, -{ - let info = "HighQcUpdated".to_string(); - let check: EventCallback = - Arc::new(move |e: Arc>| matches!(e.as_ref(), HighQcUpdated(..))); - Box::new(EventPredicate { check, info }) -} - -pub fn quorum_vote_dependencies_validated() -> Box> -where - TYPES: NodeType, -{ - let info = "QuorumVoteDependenciesValidated".to_string(); - let check: EventCallback = Arc::new(move |e: Arc>| { - matches!(e.as_ref(), QuorumVoteDependenciesValidated(..)) - }); - Box::new(EventPredicate { check, info }) -} diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 5593a4336e..e2a299a171 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -156,6 +156,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, TYPES::View::genesis(), + TYPES::Epoch::genesis(), TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), @@ -238,6 +239,7 @@ where TestInstanceState::new(self.async_delay_config.clone()), None, read_storage.last_actioned_view().await, + read_storage.last_actioned_epoch().await, read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( diff --git a/crates/testing/src/test_builder.rs b/crates/testing/src/test_builder.rs index 15cbb293ed..4773e597e0 100644 --- a/crates/testing/src/test_builder.rs +++ b/crates/testing/src/test_builder.rs @@ -98,6 +98,8 @@ pub struct TestDescription, V: Ver pub start_solver: bool, /// boxed closure used to validate the resulting transactions pub validate_transactions: TransactionValidator, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } pub fn nonempty_block_threshold(threshold: (u64, u64)) -> TransactionValidator { @@ -415,6 +417,7 @@ impl, V: Versions> Default upgrade_view: None, start_solver: true, validate_transactions: Arc::new(|_| Ok(())), + epoch_height: 0, } } } @@ -452,6 +455,7 @@ where timing_data, da_staked_committee_size, unreliable_network, + epoch_height, .. } = self.clone(); @@ -477,7 +481,7 @@ where }) .collect(); // But now to test validator's config, we input the info of my_own_validator from config file when node_id == 0. - let my_own_validator_config = ValidatorConfig::generated_from_seed_indexed( + let validator_config = ValidatorConfig::::generated_from_seed_indexed( [0u8; 32], node_id, 1, @@ -492,7 +496,6 @@ where known_da_nodes, num_bootstrap: num_bootstrap_nodes, known_nodes_with_stake, - my_own_validator_config, da_staked_committee_size, fixed_leader_for_gpuvid: 1, next_view_timeout: 500, @@ -509,7 +512,7 @@ where stop_proposing_time: 0, start_voting_time: u64::MAX, stop_voting_time: 0, - epoch_height: 0, + epoch_height, }; let TimingData { next_view_timeout, @@ -544,6 +547,7 @@ where storage }), config, + validator_config, marketplace_config: Box::new(|_| MarketplaceConfig:: { auction_results_provider: TestAuctionResultsProvider::::default().into(), fallback_builder_url: Url::parse("http://localhost:9999").unwrap(), diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index a6c5730e1c..8edffa33a8 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -16,7 +16,7 @@ use hotshot_types::{ network::{AsyncGenerator, ConnectedNetwork}, node_implementation::{NodeType, Versions}, }, - HotShotConfig, + HotShotConfig, ValidatorConfig, }; use super::{test_builder::TestDescription, test_runner::TestRunner}; @@ -36,6 +36,8 @@ pub struct ResourceGenerators>, /// configuration used to generate each hotshot node pub config: HotShotConfig, + /// config that contains the signature keys + pub validator_config: ValidatorConfig, /// generate a new marketplace config for each node pub marketplace_config: Generator>, } diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index cb2c8119ed..2a08fda5e4 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -12,10 +12,7 @@ use std::{ }; use async_broadcast::{broadcast, Receiver, Sender}; -use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use futures::future::join_all; use hotshot::{ traits::TestableNodeImplementation, @@ -43,8 +40,7 @@ use hotshot_types::{ HotShotConfig, ValidatorConfig, }; use tide_disco::Url; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{spawn, task::JoinHandle}; #[allow(deprecated)] use tracing::info; @@ -280,53 +276,29 @@ where let mut error_list = vec![]; - #[cfg(async_executor_impl = "async-std")] - { - let results = join_all(task_futs).await; - for result in results { - match result { + let results = join_all(task_futs).await; + + for result in results { + match result { + Ok(res) => match res { TestResult::Pass => { info!("Task shut down successfully"); } TestResult::Fail(e) => error_list.push(e), + }, + Err(e) => { + tracing::error!("Error Joining the test task {:?}", e); } } - if let Some(handle) = txn_handle { - handle.cancel().await; - } - // Shutdown all of the servers at the end - // Aborting here doesn't cause any problems because we don't maintain any state - if let Some(solver_server) = solver_server { - solver_server.1.cancel().await; - } } - #[cfg(async_executor_impl = "tokio")] - { - let results = join_all(task_futs).await; - - for result in results { - match result { - Ok(res) => match res { - TestResult::Pass => { - info!("Task shut down successfully"); - } - TestResult::Fail(e) => error_list.push(e), - }, - Err(e) => { - tracing::error!("Error Joining the test task {:?}", e); - } - } - } - - if let Some(handle) = txn_handle { - handle.abort(); - } - // Shutdown all of the servers at the end - // Aborting here doesn't cause any problems because we don't maintain any state - if let Some(solver_server) = solver_server { - solver_server.1.abort(); - } + if let Some(handle) = txn_handle { + handle.abort(); + } + // Shutdown all of the servers at the end + // Aborting here doesn't cause any problems because we don't maintain any state + if let Some(solver_server) = solver_server { + solver_server.1.abort(); } let mut nodes = handles.write().await; @@ -336,9 +308,6 @@ where } tracing::info!("Nodes shtudown"); - #[cfg(async_executor_impl = "async-std")] - completion_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] completion_handle.abort(); assert!( @@ -406,7 +375,7 @@ where // Then, fire it up as a background thread. self.solver_server = Some(( solver_url.clone(), - async_spawn(async move { + spawn(async move { solver_state .run::(solver_url) .await diff --git a/crates/testing/src/test_task.rs b/crates/testing/src/test_task.rs index 8d685914d3..f828a19737 100644 --- a/crates/testing/src/test_task.rs +++ b/crates/testing/src/test_task.rs @@ -4,14 +4,11 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::{sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; use anyhow::Result; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::{spawn, JoinHandle}; use async_trait::async_trait; use futures::future::select_all; use hotshot::{ @@ -26,8 +23,11 @@ use hotshot_types::{ node_implementation::{NodeType, Versions}, }, }; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::{spawn, JoinHandle}; +use tokio::{ + spawn, + task::JoinHandle, + time::{sleep, timeout}, +}; use tracing::error; use crate::test_runner::Node; @@ -134,7 +134,7 @@ impl TestTask { messages.push(receiver.recv()); } - match async_timeout(Duration::from_millis(2500), select_all(messages)).await { + match timeout(Duration::from_millis(2500), select_all(messages)).await { Ok((Ok(input), id, _)) => { let _ = S::handle_event(&mut self.state, (input, id)) .await @@ -142,7 +142,7 @@ impl TestTask { } Ok((Err(e), _id, _)) => { error!("Error from one channel in test task {:?}", e); - async_sleep(Duration::from_millis(4000)).await; + sleep(Duration::from_millis(4000)).await; } _ => {} }; @@ -168,12 +168,13 @@ pub async fn add_network_message_test_task< internal_event_stream: internal_event_stream.clone(), external_event_stream: external_event_stream.clone(), public_key, + transactions_cache: lru::LruCache::new(NonZeroUsize::new(100_000).unwrap()), }; let network = Arc::clone(&net); let mut state = network_state.clone(); - async_spawn(async move { + spawn(async move { loop { // Get the next message from the network let message = match network.recv_message().await { diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index 8f363847d0..41b5ec3b14 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -7,15 +7,11 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Receiver; -use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; use hotshot_types::traits::node_implementation::{NodeType, Versions}; use rand::thread_rng; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; +use tokio::{spawn, task::JoinHandle, time::sleep}; use crate::{test_runner::Node, test_task::TestEvent}; @@ -37,10 +33,9 @@ pub struct TxnTask, V: Ver impl, V: Versions> TxnTask { pub fn run(mut self) -> JoinHandle<()> { - async_spawn(async move { - async_sleep(Duration::from_millis(100)).await; + spawn(async move { loop { - async_sleep(self.duration).await; + sleep(self.duration).await; if let Ok(TestEvent::Shutdown) = self.shutdown_chan.try_recv() { break; } diff --git a/crates/testing/tests/tests_1/block_builder.rs b/crates/testing/tests/tests_1/block_builder.rs index f63560078f..5b0a6cf5c2 100644 --- a/crates/testing/tests/tests_1/block_builder.rs +++ b/crates/testing/tests/tests_1/block_builder.rs @@ -9,7 +9,6 @@ use std::{ time::{Duration, Instant}, }; -use async_compatibility_layer::art::async_sleep; use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, @@ -29,13 +28,10 @@ use hotshot_types::{ }, }; use tide_disco::Url; +use tokio::time::sleep; #[cfg(test)] -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_random_block_builder() { let port = portpicker::pick_unused_port().expect("No free ports"); @@ -81,7 +77,7 @@ async fn test_random_block_builder() { }; // Wait for at least one block to be built - async_sleep(Duration::from_millis(20)).await; + sleep(Duration::from_millis(20)).await; if builder_started.elapsed() > Duration::from_secs(2) { panic!("Builder failed to provide blocks in two seconds"); diff --git a/crates/testing/tests/tests_1/da_task.rs b/crates/testing/tests/tests_1/da_task.rs index ad417a0431..96a7de771d 100644 --- a/crates/testing/tests/tests_1/da_task.rs +++ b/crates/testing/tests/tests_1/da_task.rs @@ -32,11 +32,9 @@ use hotshot_types::{ }; use vbs::version::StaticVersionType; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_da_task() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -91,8 +89,8 @@ async fn test_da_task() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1)), - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(1), EpochNumber::new(1)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { @@ -101,7 +99,8 @@ async fn test_da_task() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(precompute), @@ -130,11 +129,9 @@ async fn test_da_task() { run_test![inputs, da_script].await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_da_task_storage_failure() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -192,8 +189,8 @@ async fn test_da_task_storage_failure() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1)), - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(1), EpochNumber::new(1)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { @@ -202,7 +199,8 @@ async fn test_da_task_storage_failure() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(precompute), diff --git a/crates/testing/tests/tests_1/libp2p.rs b/crates/testing/tests/tests_1/libp2p.rs index f19e3b0798..c9f7ae33db 100644 --- a/crates/testing/tests/tests_1/libp2p.rs +++ b/crates/testing/tests/tests_1/libp2p.rs @@ -17,12 +17,12 @@ use hotshot_testing::{ use tracing::instrument; /// libp2p network test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -48,12 +48,12 @@ async fn libp2p_network() { } /// libp2p network test with failures -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_failures_2() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -95,13 +95,13 @@ async fn libp2p_network_failures_2() { } /// stress test for libp2p -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] #[ignore] async fn test_stress_libp2p_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription::default_stress(); metadata diff --git a/crates/testing/tests/tests_1/network_task.rs b/crates/testing/tests/tests_1/network_task.rs index 08e4d7f88e..9a539f89e4 100644 --- a/crates/testing/tests/tests_1/network_task.rs +++ b/crates/testing/tests/tests_1/network_task.rs @@ -7,7 +7,6 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::Sender; -use async_compatibility_layer::art::async_timeout; use async_lock::RwLock; use hotshot::traits::implementations::MemoryNetwork; use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; @@ -18,6 +17,7 @@ use hotshot_testing::{ test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; use hotshot_types::{ + consensus::OuterConsensus, data::{EpochNumber, ViewNumber}, message::UpgradeLock, traits::{ @@ -25,19 +25,20 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; +use tokio::time::timeout; // Test that the event task sends a message, and the message task receives it // and emits the proper event #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[allow(clippy::too_many_lines)] async fn test_network_task() { + use std::collections::BTreeMap; + use futures::StreamExt; use hotshot_types::traits::network::Topic; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -51,9 +52,10 @@ async fn test_network_task() { let network = (launcher.resource_generator.channel_generator)(node_id).await; let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); - let consensus = handle.hotshot.consensus(); + let consensus = OuterConsensus::new(handle.hotshot.consensus()); let config = launcher.resource_generator.config.clone(); - let public_key = config.my_own_validator_config.public_key; + let validator_config = launcher.resource_generator.validator_config.clone(); + let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); @@ -69,6 +71,7 @@ async fn test_network_task() { upgrade_lock: upgrade_lock.clone(), storage, consensus, + transmit_tasks: BTreeMap::new(), }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); @@ -97,7 +100,7 @@ async fn test_network_task() { .await .unwrap(); let res: Arc> = - async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()) + timeout(Duration::from_millis(100), out_rx_internal.recv_direct()) .await .expect("timed out waiting for response") .expect("channel closed"); @@ -108,15 +111,13 @@ async fn test_network_task() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_network_external_mnessages() { use hotshot::types::EventType; use hotshot_testing::helpers::build_system_handle_from_launcher; use hotshot_types::message::RecipientList; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -141,14 +142,11 @@ async fn test_network_external_mnessages() { .send_external_message(vec![1, 2], RecipientList::Direct(handles[2].public_key())) .await .unwrap(); - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - event_streams[2].recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[2].recv()) + .await + .unwrap() + .unwrap() + .event; // check that 2 received the message assert!(matches!( @@ -164,14 +162,11 @@ async fn test_network_external_mnessages() { .send_external_message(vec![2, 1], RecipientList::Direct(handles[1].public_key())) .await .unwrap(); - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - event_streams[1].recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), event_streams[1].recv()) + .await + .unwrap() + .unwrap() + .event; // check that 1 received the message assert!(matches!( @@ -189,14 +184,11 @@ async fn test_network_external_mnessages() { .unwrap(); // All other nodes get the broadcast for stream in event_streams.iter_mut().skip(1) { - let event = async_compatibility_layer::art::async_timeout( - Duration::from_millis(100), - stream.recv(), - ) - .await - .unwrap() - .unwrap() - .event; + let event = tokio::time::timeout(Duration::from_millis(100), stream.recv()) + .await + .unwrap() + .unwrap() + .event; assert!(matches!( event, EventType::ExternalMessageReceived { @@ -206,19 +198,19 @@ async fn test_network_external_mnessages() { )); } // No event on 0 even after short sleep - async_compatibility_layer::art::async_sleep(Duration::from_millis(2)).await; + tokio::time::sleep(Duration::from_millis(2)).await; assert!(event_streams[0].is_empty()); } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_network_storage_fail() { + use std::collections::BTreeMap; + use futures::StreamExt; use hotshot_types::traits::network::Topic; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let builder: TestDescription = TestDescription::default_multiple_rounds(); @@ -230,11 +222,12 @@ async fn test_network_storage_fail() { let network = (launcher.resource_generator.channel_generator)(node_id).await; - let consensus = handle.hotshot.consensus(); + let consensus = OuterConsensus::new(handle.hotshot.consensus()); let storage = Arc::new(RwLock::new((launcher.resource_generator.storage)(node_id))); storage.write().await.should_return_err = true; let config = launcher.resource_generator.config.clone(); - let public_key = config.my_own_validator_config.public_key; + let validator_config = launcher.resource_generator.validator_config.clone(); + let public_key = validator_config.public_key; let all_nodes = config.known_nodes_with_stake.clone(); let upgrade_lock = UpgradeLock::::new(); @@ -250,6 +243,7 @@ async fn test_network_storage_fail() { upgrade_lock: upgrade_lock.clone(), storage, consensus, + transmit_tasks: BTreeMap::new(), }; let (tx, rx) = async_broadcast::broadcast(10); let mut task_reg = ConsensusTaskRegistry::new(); @@ -278,6 +272,6 @@ async fn test_network_storage_fail() { ))) .await .unwrap(); - let res = async_timeout(Duration::from_millis(100), out_rx_internal.recv_direct()).await; + let res = timeout(Duration::from_millis(100), out_rx_internal.recv_direct()).await; assert!(res.is_err()); } diff --git a/crates/testing/tests/tests_1/quorum_proposal_recv_task.rs b/crates/testing/tests/tests_1/quorum_proposal_recv_task.rs index d5a83a1953..dc42dfc723 100644 --- a/crates/testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/crates/testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -7,6 +7,8 @@ // TODO: Remove after integration #![allow(unused_imports)] +use std::sync::Arc; + use committable::Committable; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; @@ -25,8 +27,9 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{Leaf, ViewNumber}, request_response::ProposalRequestPayload, traits::{ consensus_api::ConsensusApi, @@ -37,8 +40,7 @@ use hotshot_types::{ }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task() { use std::time::Duration; @@ -46,10 +48,8 @@ async fn test_quorum_proposal_recv_task() { helpers::build_fake_view_with_leaf, script::{Expectations, TaskScript}, }; - use hotshot_types::data::Leaf; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -74,19 +74,16 @@ async fn test_quorum_proposal_recv_task() { vids.push(view.vid_proposal.clone()); leaves.push(view.leaf.clone()); - // These are both updated when we vote. Since we don't have access + // This is updated when we vote. Since we don't have access // to that, we'll just put them in here. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number, - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); @@ -98,23 +95,11 @@ async fn test_quorum_proposal_recv_task() { let expectations = vec![Expectations::from_outputs(vec![ exact(QuorumProposalPreliminarilyValidated(proposals[1].clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - exact(ValidatedStateUpdated( - ViewNumber::new(2), - build_fake_view_with_leaf_and_state( - leaves[1].clone(), - >::from_header( - &proposals[1].data.block_header, - ), - &handle.hotshot.upgrade_lock, - ) - .await, - )), exact(QuorumProposalValidated( proposals[1].clone(), leaves[0].clone(), )), - exact(ViewChange(ViewNumber::new(2))), + exact(ViewChange(ViewNumber::new(2), EpochNumber::new(0))), ])]; let state = @@ -129,8 +114,7 @@ async fn test_quorum_proposal_recv_task() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_recv_task_liveness_check() { use std::time::Duration; @@ -143,8 +127,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { }; use hotshot_types::{data::Leaf, vote::HasViewNumber}; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(4) .await @@ -173,17 +156,6 @@ async fn test_quorum_proposal_recv_task_liveness_check() { // there's no reason not to. let inserted_view_number = view.quorum_proposal.data.view_number(); - // These are both updated when we'd have voted previously. However, since - // we don't have access to that, we'll just put them in here. We - // specifically ignore writing the saved leaves so that way - // the parent lookup fails and we trigger a view liveness check. - consensus_writer - .update_validated_state_map( - inserted_view_number, - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) - .unwrap(); - // The index here is important. Since we're proposing for view 4, we need the // value from entry 2 to align the public key from the shares map. consensus_writer.update_vid_shares(inserted_view_number, view.vid_proposal.0[2].clone()); @@ -221,20 +193,8 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(QuorumProposalPreliminarilyValidated(proposals[2].clone())), - exact(ViewChange(ViewNumber::new(3))), - exact(ValidatedStateUpdated( - ViewNumber::new(3), - build_fake_view_with_leaf_and_state( - leaves[2].clone(), - >::from_header( - &proposals[2].data.block_header, - ), - &handle.hotshot.upgrade_lock - ) - .await, - )), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), exact(QuorumProposalRequestSend(req, signature)), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), ])]; let state = diff --git a/crates/testing/tests/tests_1/quorum_proposal_task.rs b/crates/testing/tests/tests_1/quorum_proposal_task.rs index ae5fa54fdc..d8ae3e2b2b 100644 --- a/crates/testing/tests/tests_1/quorum_proposal_task.rs +++ b/crates/testing/tests/tests_1/quorum_proposal_task.rs @@ -4,10 +4,10 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use hotshot::{tasks::task_state::CreateTaskState, traits::ValidatedState}; +use hotshot::tasks::task_state::CreateTaskState; use hotshot_example_types::{ block_types::TestMetadata, node_types::{MemoryImpl, TestTypes, TestVersions}, @@ -17,8 +17,8 @@ use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{events::HotShotEvent::*, quorum_proposal::QuorumProposalTaskState}; use hotshot_testing::{ all_predicates, - helpers::{build_fake_view_with_leaf, build_payload_commitment, build_system_handle}, - predicates::event::{all_predicates, exact, quorum_proposal_send}, + helpers::{build_payload_commitment, build_system_handle}, + predicates::event::{all_predicates, quorum_proposal_send}, random, script::{Expectations, InputOrder, TaskScript}, serial, @@ -32,7 +32,6 @@ use hotshot_types::{ node_implementation::{ConsensusTime, Versions}, }, utils::BuilderCommitment, - vote::HasViewNumber, }; use sha2::Digest; use vec1::vec1; @@ -40,14 +39,12 @@ use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_quorum_proposal_view_1() { use hotshot_testing::script::{Expectations, TaskScript}; use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 1; let handle = build_system_handle::(node_id) @@ -81,11 +78,14 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; + .await + .unwrap(); } // We must send the genesis cert here to initialize hotshot successfully. @@ -94,6 +94,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); drop(consensus_writer); @@ -115,20 +116,12 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { vec1![builder_fee.clone()], None, ), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; let expectations = vec![ Expectations::from_outputs(vec![]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - quorum_proposal_send(), - ]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), ]; let quorum_proposal_task_state = @@ -143,13 +136,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -177,28 +168,19 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; drop(consensus_writer); @@ -206,6 +188,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); @@ -227,10 +210,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -248,10 +227,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), @@ -269,10 +244,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), @@ -290,10 +261,6 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), @@ -311,35 +278,15 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), ]; let quorum_proposal_task_state = @@ -355,13 +302,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_qc_timeout() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -420,16 +365,13 @@ async fn test_quorum_proposal_task_qc_timeout() { ViewNumber::new(3), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(3), ) .unwrap()], None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ]]; let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; @@ -446,15 +388,13 @@ async fn test_quorum_proposal_task_qc_timeout() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_view_sync() { use hotshot_example_types::block_types::TestMetadata; use hotshot_types::data::null_block; use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 2; let handle = build_system_handle::(node_id) @@ -515,16 +455,13 @@ async fn test_quorum_proposal_task_view_sync() { ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ]]; let expectations = vec![Expectations::from_outputs(vec![quorum_proposal_send()])]; @@ -541,13 +478,11 @@ async fn test_quorum_proposal_task_view_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_liveness_check() { use vbs::version::StaticVersionType; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let node_id = 3; let handle = build_system_handle::(node_id) @@ -575,16 +510,13 @@ async fn test_quorum_proposal_task_liveness_check() { // We don't have a `QuorumProposalRecv` task handler, so we'll just manually insert the proposals // to make sure they show up during tests. consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); @@ -593,17 +525,13 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); // We need to handle the views where we aren't the leader to ensure that the states are // updated properly. - - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let inputs = vec![ random![ @@ -623,10 +551,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -644,10 +568,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[1].clone()), @@ -665,10 +585,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[2].clone()), @@ -686,10 +602,6 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[3].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[2].data.view_number(), - build_fake_view_with_leaf(leaves[2].clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[3].clone()), @@ -707,35 +619,15 @@ async fn test_quorum_proposal_task_liveness_check() { None, ), VidDisperseSend(vid_dispersals[4].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[3].data.view_number(), - build_fake_view_with_leaf(leaves[3].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; let expectations = vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), - quorum_proposal_send(), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[3].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[3].data.justify_qc.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[4].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[4].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(all_predicates![quorum_proposal_send(),]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), ]; let quorum_proposal_task_state = @@ -750,11 +642,9 @@ async fn test_quorum_proposal_task_liveness_check() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_proposal_task_with_incomplete_events() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/crates/testing/tests/tests_1/quorum_vote_task.rs b/crates/testing/tests/tests_1/quorum_vote_task.rs index 3030a1aea2..b4350abe3c 100644 --- a/crates/testing/tests/tests_1/quorum_vote_task.rs +++ b/crates/testing/tests/tests_1/quorum_vote_task.rs @@ -6,38 +6,40 @@ #![allow(clippy::panic)] -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::tasks::task_state::CreateTaskState; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes, TestVersions}, + state_types::TestValidatedState, +}; use hotshot_macros::{run_test, test_scripts}; use hotshot_testing::{ all_predicates, - helpers::{build_fake_view_with_leaf, vid_share}, + helpers::vid_share, predicates::event::all_predicates, random, script::{Expectations, InputOrder, TaskScript}, }; use hotshot_types::{ - data::ViewNumber, traits::node_implementation::ConsensusTime, vote::HasViewNumber, + data::{EpochNumber, Leaf, ViewNumber}, + traits::node_implementation::ConsensusTime, }; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_success() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, - predicates::event::{exact, quorum_vote_send, validated_state_updated}, + predicates::event::{exact, quorum_vote_send}, view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -61,14 +63,14 @@ async fn test_quorum_vote_task_success() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -83,8 +85,7 @@ async fn test_quorum_vote_task_success() { let expectations = vec![Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), quorum_vote_send(), ])]; @@ -100,16 +101,14 @@ async fn test_quorum_vote_task_success() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_miss_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -136,14 +135,14 @@ async fn test_quorum_vote_task_miss_dependency() { leaves.push(view.leaf.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -167,10 +166,9 @@ async fn test_quorum_vote_task_miss_dependency() { Expectations::from_outputs(all_predicates![exact(VidShareValidated( vids[1].0[0].clone() ))]), - Expectations::from_outputs(all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(1))), - exact(DaCertificateValidated(dacs[2].clone())) - ]), + Expectations::from_outputs(all_predicates![exact(DaCertificateValidated( + dacs[2].clone() + ))]), Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), @@ -189,16 +187,14 @@ async fn test_quorum_vote_task_miss_dependency() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_quorum_vote_task_incorrect_dependency() { use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::QuorumVoteTaskState}; use hotshot_testing::{ helpers::build_system_handle, predicates::event::exact, view_generator::TestViewGenerator, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await diff --git a/crates/testing/tests/tests_1/test_success.rs b/crates/testing/tests/tests_1/test_success.rs index 853f823278..588c718a83 100644 --- a/crates/testing/tests/tests_1/test_success.rs +++ b/crates/testing/tests/tests_1/test_success.rs @@ -8,8 +8,8 @@ use std::time::Duration; use hotshot_example_types::{ node_types::{ - Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, TestTypes, - TestTypesRandomizedLeader, TestVersions, + EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, TestConsecutiveLeaderTypes, + TestTypes, TestTypesRandomizedLeader, TestVersions, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -132,3 +132,22 @@ cross_tests!( metadata } ); + +cross_tests!( + TestName: test_epoch_end, + Impls: [MemoryImpl], + Types: [TestTypes], + Versions: [EpochsTestVersions], + Ignore: false, + Metadata: { + TestDescription { + completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_millis(100000), + }, + ), + epoch_height: 10, + ..TestDescription::default() + } + }, +); diff --git a/crates/testing/tests/tests_1/transaction_task.rs b/crates/testing/tests/tests_1/transaction_task.rs index 04cd0d528e..0a4877fcc1 100644 --- a/crates/testing/tests/tests_1/transaction_task.rs +++ b/crates/testing/tests/tests_1/transaction_task.rs @@ -18,11 +18,9 @@ use hotshot_types::{ use vbs::version::StaticVersionType; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_transaction_task_leader_two_views_in_a_row() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 2. let node_id = 2; @@ -35,7 +33,11 @@ async fn test_transaction_task_leader_two_views_in_a_row() { let mut output = Vec::new(); let current_view = ViewNumber::new(4); - input.push(HotShotEvent::ViewChange(current_view)); + input.push(HotShotEvent::ViewChange(current_view, EpochNumber::new(1))); + input.push(HotShotEvent::ViewChange( + current_view + 1, + EpochNumber::new(1), + )); input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); @@ -52,7 +54,8 @@ async fn test_transaction_task_leader_two_views_in_a_row() { vec1::vec1![ null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(4), ) .unwrap() ], diff --git a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs index b82804f4f8..ac4b338f37 100644 --- a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -46,19 +46,14 @@ use vec1::vec1; const TIMEOUT: Duration = Duration::from_millis(35); -#[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) -)] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] /// Test that we correctly form and include an `UpgradeCertificate` when receiving votes. async fn test_upgrade_task_with_proposal() { use std::sync::Arc; use hotshot_testing::helpers::build_system_handle; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(3) .await @@ -100,16 +95,13 @@ async fn test_upgrade_task_with_proposal() { leaders.push(view.leader_public_key); views.push(view.clone()); consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } @@ -124,29 +116,23 @@ async fn test_upgrade_task_with_proposal() { leaves.push(view.leaf.clone()); views.push(view.clone()); consensus_writer - .update_saved_leaves( + .update_leaf( Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, &handle.hotshot.upgrade_lock, ) - .await; - consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, - ) + .await .unwrap(); } drop(consensus_writer); - let (validated_state, _ /* state delta */) = >::genesis(&*handle.hotshot.instance_state()); let genesis_cert = proposals[0].data.justify_qc.clone(); - let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, + *ViewNumber::new(1), ) .unwrap(); @@ -185,10 +171,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[0].clone(), handle.public_key()), - ValidatedStateUpdated( - genesis_cert.view_number(), - build_fake_view_with_leaf(genesis_leaf.clone(), &handle.hotshot.upgrade_lock).await, - ), ], random![ QuorumProposalPreliminarilyValidated(proposals[0].clone()), @@ -206,10 +188,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[1].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[0].data.view_number(), - build_fake_view_with_leaf(leaves[0].clone(), &handle.hotshot.upgrade_lock).await, - ), ], InputOrder::Random(upgrade_vote_recvs), random![ @@ -228,10 +206,6 @@ async fn test_upgrade_task_with_proposal() { None, ), VidDisperseSend(vid_dispersals[2].clone(), handle.public_key()), - ValidatedStateUpdated( - proposals[1].data.view_number(), - build_fake_view_with_leaf(leaves[1].clone(), &handle.hotshot.upgrade_lock).await, - ), ], ]; @@ -239,18 +213,10 @@ async fn test_upgrade_task_with_proposal() { timeout: TIMEOUT, state: proposal_state, expectations: vec![ - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(genesis_cert.clone())), - exact(HighQcUpdated(genesis_cert.clone())), - ]), - Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[1].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[1].data.justify_qc.clone())), - ]), + Expectations::from_outputs(vec![]), + Expectations::from_outputs(vec![]), Expectations::from_outputs(vec![]), Expectations::from_outputs(all_predicates![ - exact(UpdateHighQc(proposals[2].data.justify_qc.clone())), - exact(HighQcUpdated(proposals[2].data.justify_qc.clone())), quorum_proposal_send_with_upgrade_certificate::() ]), ], diff --git a/crates/testing/tests/tests_1/upgrade_task_with_vote.rs b/crates/testing/tests/tests_1/upgrade_task_with_vote.rs index 212ca114bb..72790bec4d 100644 --- a/crates/testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/crates/testing/tests/tests_1/upgrade_task_with_vote.rs @@ -6,14 +6,14 @@ #![allow(unused_imports)] -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; use hotshot::{tasks::task_state::CreateTaskState, types::SystemContextHandle}; use hotshot_example_types::{ block_types::{TestMetadata, TestTransaction}, node_types::{MemoryImpl, TestTypes, TestVersions}, - state_types::TestInstanceState, + state_types::{TestInstanceState, TestValidatedState}, }; use hotshot_macros::{run_test, test_scripts}; use hotshot_task_impls::{ @@ -29,24 +29,21 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, traits::{election::Membership, node_implementation::ConsensusTime}, vote::HasViewNumber, }; use vbs::version::Version; - const TIMEOUT: Duration = Duration::from_millis(65); -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] /// Tests that we correctly update our internal quorum vote state when reaching a decided upgrade /// certificate. async fn test_upgrade_task_with_vote() { use hotshot_testing::helpers::build_system_handle; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); let handle = build_system_handle::(2) .await @@ -84,14 +81,14 @@ async fn test_upgrade_task_with_vote() { leaders.push(view.leader_public_key); leaves.push(view.leaf.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -137,55 +134,37 @@ async fn test_upgrade_task_with_vote() { Expectations::from_outputs(all_predicates![ exact(DaCertificateValidated(dacs[1].clone())), exact(VidShareValidated(vids[1].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), quorum_vote_send(), ]), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(1))), exact(DaCertificateValidated(dacs[2].clone())), exact(VidShareValidated(vids[2].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(3))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(4), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], ), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(2))), - exact(LastDecidedViewUpdated(ViewNumber::new(1))), - leaf_decided(), exact(DaCertificateValidated(dacs[3].clone())), exact(VidShareValidated(vids[3].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(4))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(5), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], ), Expectations::from_outputs_and_task_states( all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(3))), - exact(LastDecidedViewUpdated(ViewNumber::new(2))), - leaf_decided(), exact(DaCertificateValidated(dacs[4].clone())), exact(VidShareValidated(vids[4].0[0].clone())), - exact(QuorumVoteDependenciesValidated(ViewNumber::new(5))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(6), EpochNumber::new(0))), quorum_vote_send(), ], vec![no_decided_upgrade_certificate()], ), - Expectations::from_outputs_and_task_states( - all_predicates![ - exact(LockedViewUpdated(ViewNumber::new(4))), - exact(LastDecidedViewUpdated(ViewNumber::new(3))), - leaf_decided(), - ], - vec![decided_upgrade_certificate()], - ), + Expectations::from_outputs_and_task_states(vec![], vec![decided_upgrade_certificate()]), ]; let vote_state = diff --git a/crates/testing/tests/tests_1/vid_task.rs b/crates/testing/tests/tests_1/vid_task.rs index c2ca9aec09..8d8daf4c5e 100644 --- a/crates/testing/tests/tests_1/vid_task.rs +++ b/crates/testing/tests/tests_1/vid_task.rs @@ -33,13 +33,11 @@ use jf_vid::{precomputable::Precomputable, VidScheme}; use vbs::version::StaticVersionType; use vec1::vec1; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_vid_task() { use hotshot_types::message::Proposal; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 2. let handle = build_system_handle::(2) @@ -102,9 +100,9 @@ async fn test_vid_task() { _pd: PhantomData, }; let inputs = vec![ - serial![ViewChange(ViewNumber::new(1))], + serial![ViewChange(ViewNumber::new(1), EpochNumber::new(1))], serial![ - ViewChange(ViewNumber::new(2)), + ViewChange(ViewNumber::new(2), EpochNumber::new(1)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { @@ -113,7 +111,8 @@ async fn test_vid_task() { ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], Some(vid_precompute), @@ -134,12 +133,12 @@ async fn test_vid_task() { ViewNumber::new(2), vec1![null_block::builder_fee::( quorum_membership.total_nodes(EpochNumber::new(0)), - ::Base::VERSION + ::Base::VERSION, + *ViewNumber::new(2), ) .unwrap()], None, )), - exact(BlockReady(vid_disperse, ViewNumber::new(2))), exact(VidDisperseSend(vid_proposal.clone(), pub_key)), ]), ]; diff --git a/crates/testing/tests/tests_1/view_sync_task.rs b/crates/testing/tests/tests_1/view_sync_task.rs index 5b2a9bf2d3..85827dbbdb 100644 --- a/crates/testing/tests/tests_1/view_sync_task.rs +++ b/crates/testing/tests/tests_1/view_sync_task.rs @@ -10,17 +10,16 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, view_sync::ViewSyncTaskState, }; use hotshot_testing::helpers::build_system_handle; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, simple_vote::ViewSyncPreCommitData, traits::node_implementation::ConsensusTime, }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_view_sync_task() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // Build the API for node 5. let handle = build_system_handle::(5) @@ -51,7 +50,10 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Shutdown); - output.push(HotShotEvent::ViewChange(ViewNumber::new(2))); + output.push(HotShotEvent::ViewChange( + ViewNumber::new(3), + EpochNumber::new(0), + )); output.push(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone())); let view_sync_state = diff --git a/crates/testing/tests/tests_1/vote_dependency_handle.rs b/crates/testing/tests/tests_1/vote_dependency_handle.rs index 085e37862a..9a58e4046c 100644 --- a/crates/testing/tests/tests_1/vote_dependency_handle.rs +++ b/crates/testing/tests/tests_1/vote_dependency_handle.rs @@ -1,34 +1,34 @@ use std::time::Duration; use async_broadcast::broadcast; -use async_compatibility_layer::art::async_timeout; use futures::StreamExt; -use hotshot_example_types::node_types::{MemoryImpl, TestTypes, TestVersions}; +use hotshot_example_types::{ + node_types::{MemoryImpl, TestTypes, TestVersions}, + state_types::TestValidatedState, +}; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_task_impls::{events::HotShotEvent::*, quorum_vote::VoteDependencyHandle}; use hotshot_testing::{ - helpers::{build_fake_view_with_leaf, build_system_handle}, + helpers::build_system_handle, predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; use hotshot_types::{ consensus::OuterConsensus, - data::{EpochNumber, ViewNumber}, + data::{EpochNumber, Leaf, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, - vote::HasViewNumber, }; use itertools::Itertools; +use tokio::time::timeout; const TIMEOUT: Duration = Duration::from_millis(35); #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_vote_dependency_handle() { use std::sync::Arc; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); // We use a node ID of 2 here abitrarily. We just need it to build the system handle. let node_id = 2; @@ -54,14 +54,14 @@ async fn test_vote_dependency_handle() { dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); consensus_writer - .update_validated_state_map( - view.quorum_proposal.data.view_number(), - build_fake_view_with_leaf(view.leaf.clone(), &handle.hotshot.upgrade_lock).await, + .update_leaf( + Leaf::from_quorum_proposal(&view.quorum_proposal.data), + Arc::new(TestValidatedState::default()), + None, + &handle.hotshot.upgrade_lock, ) + .await .unwrap(); - consensus_writer - .update_saved_leaves(view.leaf.clone(), &handle.hotshot.upgrade_lock) - .await; } drop(consensus_writer); @@ -79,8 +79,7 @@ async fn test_vote_dependency_handle() { for inputs in all_inputs.into_iter() { // The outputs are static here, but we re-make them since we use `into_iter` below let outputs = vec![ - exact(QuorumVoteDependenciesValidated(ViewNumber::new(2))), - validated_state_updated(), + exact(ViewChange(ViewNumber::new(3), EpochNumber::new(0))), quorum_vote_send(), ]; @@ -96,11 +95,11 @@ async fn test_vote_dependency_handle() { quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), storage: Arc::clone(&handle.storage()), view_number, - epoch_number: EpochNumber::new(1), sender: event_sender.clone(), receiver: event_receiver.clone().deactivate(), upgrade_lock: handle.hotshot.upgrade_lock.clone(), id: handle.hotshot.id, + epoch_height: handle.hotshot.config.epoch_height, }; vote_dependency_handle_state @@ -110,9 +109,7 @@ async fn test_vote_dependency_handle() { // We need to avoid re-processing the inputs during our output evaluation. This part here is not // strictly necessary, but it makes writing the outputs easier. let mut output_events = vec![]; - while let Ok(Ok(received_output)) = - async_timeout(TIMEOUT, event_receiver.recv_direct()).await - { + while let Ok(Ok(received_output)) = timeout(TIMEOUT, event_receiver.recv_direct()).await { output_events.push(received_output); } diff --git a/crates/testing/tests/tests_2/catchup.rs b/crates/testing/tests/tests_2/catchup.rs index 3d4702a3ec..bf48a2558c 100644 --- a/crates/testing/tests/tests_2/catchup.rs +++ b/crates/testing/tests/tests_2/catchup.rs @@ -19,8 +19,7 @@ use hotshot_testing::{ }; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup() { use std::time::Duration; @@ -33,8 +32,8 @@ async fn test_catchup() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -79,8 +78,7 @@ async fn test_catchup() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_cdn() { use std::time::Duration; @@ -93,8 +91,8 @@ async fn test_catchup_cdn() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -134,8 +132,7 @@ async fn test_catchup_cdn() { /// Test that one node catches up and has successful views after coming back #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_one_node() { use std::time::Duration; @@ -147,8 +144,8 @@ async fn test_catchup_one_node() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -190,8 +187,7 @@ async fn test_catchup_one_node() { /// Same as `test_catchup` except we start the nodes after their leadership so they join during view sync #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_in_view_sync() { use std::time::Duration; @@ -203,8 +199,8 @@ async fn test_catchup_in_view_sync() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -253,8 +249,7 @@ async fn test_catchup_in_view_sync() { // Almost the same as `test_catchup`, but with catchup nodes reloaded from anchor leaf rather than // initialized from genesis. #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_catchup_reload() { use std::time::Duration; @@ -267,8 +262,8 @@ async fn test_catchup_reload() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() diff --git a/crates/testing/tests/tests_3/memory_network.rs b/crates/testing/tests/tests_3/memory_network.rs index 3e56630313..875e5b40e9 100644 --- a/crates/testing/tests/tests_3/memory_network.rs +++ b/crates/testing/tests/tests_3/memory_network.rs @@ -7,7 +7,6 @@ #![allow(clippy::panic)] use std::{sync::Arc, time::Duration}; -use async_compatibility_layer::{art::async_timeout, logging::setup_logging}; use hotshot::{ traits::{ election::static_committee::StaticCommittee, @@ -34,6 +33,7 @@ use hotshot_types::{ }; use rand::{rngs::StdRng, RngCore, SeedableRng}; use serde::{Deserialize, Serialize}; +use tokio::time::timeout; use tracing::{instrument, trace}; #[derive( @@ -120,22 +120,22 @@ fn gen_messages(num_messages: u64, seed: u64, pk: BLSPubKey) -> Vec::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key = pubkey(); } // // Spawning a two MemoryNetworks and connecting them should produce no errors -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_spawn_double() { - setup_logging(); + hotshot::helpers::initialize_logging(); let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); let _pub_key_1 = pubkey(); @@ -143,11 +143,11 @@ async fn memory_network_spawn_double() { } // Check to make sure direct queue works -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_direct_queue() { - setup_logging(); + hotshot::helpers::initialize_logging(); // Create some dummy messages // Make and connect the networking instances @@ -177,11 +177,9 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network2.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } @@ -200,18 +198,16 @@ async fn memory_network_direct_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network1.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } } // Check to make sure direct queue works -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn memory_network_broadcast_queue() { // Make and connect the networking instances @@ -238,11 +234,9 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network2.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network2.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } @@ -265,21 +259,18 @@ async fn memory_network_broadcast_queue() { .await .expect("Failed to receive message"); let deserialized_message = upgrade_lock.deserialize(&recv_message).await.unwrap(); - assert!( - async_timeout(Duration::from_secs(1), network1.recv_message()) - .await - .is_err() - ); + assert!(timeout(Duration::from_secs(1), network1.recv_message()) + .await + .is_err()); fake_message_eq(sent_message, deserialized_message); } } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] #[allow(deprecated)] async fn memory_network_test_in_flight_message_count() { - setup_logging(); + hotshot::helpers::initialize_logging(); let group: Arc::SignatureKey>> = MasterMap::new(); trace!(?group); diff --git a/crates/testing/tests/tests_4/byzantine_tests.rs b/crates/testing/tests/tests_4/byzantine_tests.rs index 8b460a5665..7d89d6c870 100644 --- a/crates/testing/tests/tests_4/byzantine_tests.rs +++ b/crates/testing/tests/tests_4/byzantine_tests.rs @@ -1,61 +1,61 @@ -use std::{collections::HashMap, rc::Rc, time::Duration}; +// use std::{collections::HashMap, rc::Rc, time::Duration}; -use hotshot_example_types::{ - node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl}, - state_types::TestTypes, -}; -use hotshot_macros::cross_tests; -use hotshot_testing::{ - block_builder::SimpleBuilderImplementation, - byzantine::byzantine_behaviour::ViewDelay, - completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, - test_builder::{Behaviour, TestDescription}, -}; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +// use hotshot_example_types::{ +// node_types::{Libp2pImpl, MarketplaceTestVersions, MemoryImpl, PushCdnImpl}, +// state_types::TestTypes, +// }; +// use hotshot_macros::cross_tests; +// use hotshot_testing::{ +// block_builder::SimpleBuilderImplementation, +// byzantine::byzantine_behaviour::ViewDelay, +// completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, +// test_builder::{Behaviour, TestDescription}, +// }; +// use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; -cross_tests!( - TestName: view_delay, - Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], - Types: [TestTypes], - Versions: [MarketplaceTestVersions], - Ignore: false, - Metadata: { +// cross_tests!( +// TestName: view_delay, +// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], +// Types: [TestTypes], +// Versions: [MarketplaceTestVersions], +// Ignore: false, +// Metadata: { - let behaviour = Rc::new(|node_id| { - let view_delay = ViewDelay { - number_of_views_to_delay: node_id/3, - events_for_view: HashMap::new(), - stop_view_delay_at_view_number: 25, - }; - match node_id { - 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), - _ => Behaviour::Standard, - } - }); +// let behaviour = Rc::new(|node_id| { +// let view_delay = ViewDelay { +// number_of_views_to_delay: node_id/3, +// events_for_view: HashMap::new(), +// stop_view_delay_at_view_number: 25, +// }; +// match node_id { +// 6|10|14 => Behaviour::Byzantine(Box::new(view_delay)), +// _ => Behaviour::Standard, +// } +// }); - let mut metadata = TestDescription { - // allow more time to pass in CI - completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( - TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), - }, - ), - behaviour, - ..TestDescription::default() - }; +// let mut metadata = TestDescription { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// behaviour, +// ..TestDescription::default() +// }; - let num_nodes_with_stake = 15; - metadata.num_nodes_with_stake = num_nodes_with_stake; - metadata.da_staked_committee_size = num_nodes_with_stake; - metadata.overall_safety_properties.num_failed_views = 20; - metadata.overall_safety_properties.num_successful_views = 20; - metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ - (ViewNumber::new(6), false), - (ViewNumber::new(10), false), - (ViewNumber::new(14), false), - (ViewNumber::new(21), false), - (ViewNumber::new(25), false), - ]); - metadata - }, -); +// let num_nodes_with_stake = 15; +// metadata.num_nodes_with_stake = num_nodes_with_stake; +// metadata.da_staked_committee_size = num_nodes_with_stake; +// metadata.overall_safety_properties.num_failed_views = 20; +// metadata.overall_safety_properties.num_successful_views = 20; +// metadata.overall_safety_properties.expected_views_to_fail = HashMap::from([ +// (ViewNumber::new(6), false), +// (ViewNumber::new(10), false), +// (ViewNumber::new(14), false), +// (ViewNumber::new(21), false), +// (ViewNumber::new(25), false), +// ]); +// metadata +// }, +// ); diff --git a/crates/testing/tests/tests_4/test_with_failures_f.rs b/crates/testing/tests/tests_4/test_with_failures_f.rs index 6ea256fc2d..4fd033d0af 100644 --- a/crates/testing/tests/tests_4/test_with_failures_f.rs +++ b/crates/testing/tests/tests_4/test_with_failures_f.rs @@ -23,7 +23,7 @@ cross_tests!( Ignore: false, Metadata: { let mut metadata = TestDescription::default_more_nodes(); - metadata.overall_safety_properties.num_failed_views = 5; + metadata.overall_safety_properties.num_failed_views = 6; // Make sure we keep committing rounds after the bad leaders, but not the full 50 because of the numerous timeouts metadata.overall_safety_properties.num_successful_views = 20; metadata.num_bootstrap_nodes = 14; diff --git a/crates/testing/tests/tests_5/broken_3_chain.rs b/crates/testing/tests/tests_5/broken_3_chain.rs index 9d020cb144..e785e0ae42 100644 --- a/crates/testing/tests/tests_5/broken_3_chain.rs +++ b/crates/testing/tests/tests_5/broken_3_chain.rs @@ -12,12 +12,12 @@ use hotshot_testing::{ use tracing::instrument; /// Broken 3-chain test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn broken_3_chain() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, diff --git a/crates/testing/tests/tests_5/combined_network.rs b/crates/testing/tests/tests_5/combined_network.rs index 81222cd088..67fb0e4030 100644 --- a/crates/testing/tests/tests_5/combined_network.rs +++ b/crates/testing/tests/tests_5/combined_network.rs @@ -19,14 +19,13 @@ use tracing::instrument; /// A run with both the CDN and libp2p functioning properly #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network() { use hotshot_testing::block_builder::SimpleBuilderImplementation; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -54,12 +53,12 @@ async fn test_combined_network() { } // A run where the CDN crashes part-way through -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_cdn_crash() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -100,12 +99,12 @@ async fn test_combined_network_cdn_crash() { // A run where the CDN crashes partway through // and then comes back up -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_reup() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -151,12 +150,12 @@ async fn test_combined_network_reup() { } // A run where half of the nodes disconnect from the CDN -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_combined_network_half_dc() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -224,13 +223,13 @@ fn generate_random_node_changes( } // A fuzz test, where random network events take place on all nodes -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] #[ignore] async fn test_stress_combined_network_fuzzy() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let mut metadata: TestDescription = TestDescription { num_bootstrap_nodes: 10, num_nodes_with_stake: 20, diff --git a/crates/testing/tests/tests_5/fake_solver.rs b/crates/testing/tests/tests_5/fake_solver.rs index 27f1c91bb8..cfdd0a3b3b 100644 --- a/crates/testing/tests/tests_5/fake_solver.rs +++ b/crates/testing/tests/tests_5/fake_solver.rs @@ -1,4 +1,3 @@ -use async_compatibility_layer::art::async_spawn; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResult, node_types::TestTypes, }; @@ -9,12 +8,12 @@ use tracing::instrument; use url::Url; #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_non_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new( None, /* 0% error rate */ @@ -33,7 +32,7 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -49,9 +48,6 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { .await .unwrap(); - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); @@ -60,12 +56,12 @@ async fn test_fake_solver_fetch_non_permissioned_no_error() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_non_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); @@ -78,7 +74,7 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -132,9 +128,6 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { } } - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); // Assert over the payloads with a 50% error rate. @@ -147,12 +140,12 @@ async fn test_fake_solver_fetch_non_permissioned_with_errors() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_permissioned_no_error() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new( None, /* 0% error rate */ @@ -174,7 +167,7 @@ async fn test_fake_solver_fetch_permissioned_no_error() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -198,9 +191,6 @@ async fn test_fake_solver_fetch_permissioned_no_error() { .await .unwrap(); - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); assert_eq!(resp.urls[0], Url::parse("http://localhost:1111/").unwrap()); @@ -209,12 +199,12 @@ async fn test_fake_solver_fetch_permissioned_no_error() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn test_fake_solver_fetch_permissioned_with_errors() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + use tokio::spawn; + + hotshot::helpers::initialize_logging(); let solver_state = FakeSolverState::new(Some(0.5), vec!["http://localhost:1111".parse().unwrap()]); @@ -230,7 +220,7 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { .parse() .unwrap(); let url = solver_url.clone(); - let solver_handle = async_spawn(async move { + let solver_handle = spawn(async move { solver_state.run::(url).await.unwrap(); }); @@ -288,9 +278,6 @@ async fn test_fake_solver_fetch_permissioned_with_errors() { } } - #[cfg(async_executor_impl = "async-std")] - solver_handle.cancel().await; - #[cfg(async_executor_impl = "tokio")] solver_handle.abort(); // Assert over the payloads with a 50% error rate. diff --git a/crates/testing/tests/tests_5/push_cdn.rs b/crates/testing/tests/tests_5/push_cdn.rs index 97e3dc0cb2..b90a038d99 100644 --- a/crates/testing/tests/tests_5/push_cdn.rs +++ b/crates/testing/tests/tests_5/push_cdn.rs @@ -6,7 +6,6 @@ use std::time::Duration; -use async_compatibility_layer::logging::shutdown_logging; use hotshot_example_types::node_types::{PushCdnImpl, TestTypes, TestVersions}; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, @@ -17,12 +16,12 @@ use hotshot_testing::{ use tracing::instrument; /// Push CDN network test -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] + +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn push_cdn_network() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { timing_data: TimingData { next_view_timeout: 10_000, @@ -45,5 +44,4 @@ async fn push_cdn_network() { .launch() .run_test::() .await; - shutdown_logging(); } diff --git a/crates/testing/tests/tests_5/timeout.rs b/crates/testing/tests/tests_5/timeout.rs index 9809ea7eb5..4466a4e2f6 100644 --- a/crates/testing/tests/tests_5/timeout.rs +++ b/crates/testing/tests/tests_5/timeout.rs @@ -5,8 +5,7 @@ // along with the HotShot repository. If not, see . #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_timeout() { use std::time::Duration; @@ -18,8 +17,8 @@ async fn test_timeout() { spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() @@ -62,8 +61,7 @@ async fn test_timeout() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_timeout_libp2p() { use std::time::Duration; @@ -77,8 +75,8 @@ async fn test_timeout_libp2p() { test_builder::{TestDescription, TimingData}, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let timing_data = TimingData { next_view_timeout: 2000, ..Default::default() diff --git a/crates/testing/tests/tests_5/unreliable_network.rs b/crates/testing/tests/tests_5/unreliable_network.rs index 0e4f611a42..8bc6e2d885 100644 --- a/crates/testing/tests/tests_5/unreliable_network.rs +++ b/crates/testing/tests/tests_5/unreliable_network.rs @@ -18,12 +18,11 @@ use hotshot_types::traits::network::{ }; use tracing::instrument; -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_sync() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -49,8 +48,7 @@ async fn libp2p_network_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_sync() { use std::time::Duration; @@ -60,8 +58,8 @@ async fn test_memory_network_sync() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -82,13 +80,12 @@ async fn test_memory_network_sync() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] #[instrument] async fn libp2p_network_async() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -122,9 +119,8 @@ async fn libp2p_network_async() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_async() { use std::time::Duration; @@ -134,8 +130,8 @@ async fn test_memory_network_async() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, @@ -169,8 +165,7 @@ async fn test_memory_network_async() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_partially_sync() { use std::time::Duration; @@ -180,8 +175,8 @@ async fn test_memory_network_partially_sync() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, @@ -220,12 +215,11 @@ async fn test_memory_network_partially_sync() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[instrument] async fn libp2p_network_partially_sync() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { num_failed_views: 0, @@ -261,9 +255,8 @@ async fn libp2p_network_partially_sync() { } #[cfg(test)] -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] #[ignore] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] async fn test_memory_network_chaos() { use std::time::Duration; @@ -273,8 +266,8 @@ async fn test_memory_network_chaos() { test_builder::TestDescription, }; - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { // allow more time to pass in CI completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( @@ -299,13 +292,12 @@ async fn test_memory_network_chaos() { .await; } -#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] -#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +#[tokio::test(flavor = "multi_thread")] #[ignore] #[instrument] async fn libp2p_network_chaos() { - async_compatibility_layer::logging::setup_logging(); - async_compatibility_layer::logging::setup_backtrace(); + hotshot::helpers::initialize_logging(); + let metadata: TestDescription = TestDescription { overall_safety_properties: OverallSafetyPropertiesDescription { check_leaf: true, diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index e9ad6368b5..0184255278 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -13,7 +13,6 @@ ark-ff = { workspace = true } ark-serialize = { workspace = true } ark-srs = { version = "0.3.1" } ark-std = { workspace = true } -async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } @@ -40,7 +39,7 @@ typenum = { workspace = true } derivative = "2" jf-vid = { workspace = true } jf-pcs = { workspace = true } -jf-signature = { workspace = true, features = ["schnorr"] } +jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-utils = { workspace = true } rand_chacha = { workspace = true } serde = { workspace = true } @@ -48,7 +47,7 @@ serde_bytes = { workspace = true } tagged-base64 = { workspace = true } vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } -dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } +dyn-clone = "1.0.17" url = { workspace = true } utils = { path = "../utils" } vec1 = { workspace = true } @@ -57,16 +56,12 @@ serde_json = { workspace = true } surf-disco = { workspace = true } toml = { workspace = true } clap = { workspace = true } +tokio = { workspace = true } [features] gpu-vid = ["jf-vid/gpu-vid"] test-srs = ["jf-vid/test-srs"] -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } [lints] workspace = true diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index ec872af603..eb8fe4ade2 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -14,7 +14,7 @@ use std::{ }; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; -use committable::Commitment; +use committable::{Commitment, Committable}; use tracing::instrument; use utils::anytrace::*; use vec1::Vec1; @@ -33,7 +33,9 @@ use crate::{ signature_key::SignatureKey, BlockPayload, ValidatedState, }, - utils::{BuilderCommitment, StateAndDelta, Terminator}, + utils::{ + epoch_from_block_number, BuilderCommitment, LeafCommitment, StateAndDelta, Terminator, + }, vid::VidCommitment, vote::HasViewNumber, }; @@ -317,6 +319,9 @@ pub struct Consensus { /// A reference to the metrics trait pub metrics: Arc, + + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } /// Contains several `ConsensusMetrics` that we're interested in from the consensus interfaces @@ -405,6 +410,7 @@ impl Consensus { saved_payloads: BTreeMap>, high_qc: QuorumCertificate, metrics: Arc, + epoch_height: u64, ) -> Self { Consensus { validated_state_map, @@ -420,6 +426,7 @@ impl Consensus { saved_payloads, high_qc, metrics, + epoch_height, } } @@ -484,7 +491,7 @@ impl Consensus { pub fn update_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.cur_view, - "New view isn't newer than the current view." + debug!("New view isn't newer than the current view.") ); self.cur_view = view_number; Ok(()) @@ -496,8 +503,9 @@ impl Consensus { pub fn update_epoch(&mut self, epoch_number: TYPES::Epoch) -> Result<()> { ensure!( epoch_number > self.cur_epoch, - "New epoch isn't newer than the current epoch." + debug!("New epoch isn't newer than the current epoch.") ); + tracing::trace!("Updating epoch from {} to {}", self.cur_epoch, epoch_number); self.cur_epoch = epoch_number; Ok(()) } @@ -548,7 +556,7 @@ impl Consensus { .last_proposals .last_key_value() .map_or(TYPES::View::genesis(), |(k, _)| { *k }), - "New view isn't newer than the previously proposed view." + debug!("New view isn't newer than the previously proposed view.") ); self.last_proposals .insert(proposal.data.view_number(), proposal); @@ -562,7 +570,7 @@ impl Consensus { pub fn update_last_decided_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.last_decided_view, - "New view isn't newer than the previously decided view." + debug!("New view isn't newer than the previously decided view.") ); self.last_decided_view = view_number; Ok(()) @@ -575,7 +583,7 @@ impl Consensus { pub fn update_locked_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.locked_view, - "New view isn't newer than the previously locked view." + debug!("New view isn't newer than the previously locked view.") ); self.locked_view = view_number; Ok(()) @@ -586,7 +594,48 @@ impl Consensus { /// # Errors /// Can return an error when the new view contains less information than the exisiting view /// with the same view number. - pub fn update_validated_state_map( + pub fn update_da_view( + &mut self, + view_number: TYPES::View, + payload_commitment: VidCommitment, + ) -> Result<()> { + let view = View { + view_inner: ViewInner::Da { payload_commitment }, + }; + self.update_validated_state_map(view_number, view) + } + + /// Update the validated state map with a new view_number/view combo. + /// + /// # Errors + /// Can return an error when the new view contains less information than the exisiting view + /// with the same view number. + pub async fn update_leaf( + &mut self, + leaf: Leaf, + state: Arc, + delta: Option>::Delta>>, + upgrade_lock: &UpgradeLock, + ) -> Result<()> { + let view_number = leaf.view_number(); + let view = View { + view_inner: ViewInner::Leaf { + leaf: leaf.commit(upgrade_lock).await, + state, + delta, + }, + }; + self.update_validated_state_map(view_number, view)?; + self.update_saved_leaves(leaf, upgrade_lock).await; + Ok(()) + } + + /// Update the validated state map with a new view_number/view combo. + /// + /// # Errors + /// Can return an error when the new view contains less information than the exisiting view + /// with the same view number. + fn update_validated_state_map( &mut self, view_number: TYPES::View, new_view: View, @@ -604,7 +653,7 @@ impl Consensus { { ensure!( new_delta.is_some() || existing_delta.is_none(), - "Skipping the state update to not override a `Leaf` view with `Some` state delta." + debug!("Skipping the state update to not override a `Leaf` view with `Some` state delta.") ); } else { bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); @@ -616,7 +665,7 @@ impl Consensus { } /// Update the saved leaves with a new leaf. - pub async fn update_saved_leaves( + async fn update_saved_leaves( &mut self, leaf: Leaf, upgrade_lock: &UpgradeLock, @@ -648,7 +697,7 @@ impl Consensus { pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { ensure!( high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, - "High QC with an equal or higher view exists." + debug!("High QC with an equal or higher view exists.") ); tracing::debug!("Updating high QC"); self.high_qc = high_qc; @@ -835,6 +884,103 @@ impl Consensus { } Some(()) } + + /// Return true if the high QC takes part in forming an eQC, i.e. + /// it is one of the 3-chain certificates but not the eQC itself + pub fn is_high_qc_forming_eqc(&self) -> bool { + let high_qc_leaf_commit = self.high_qc().data.leaf_commit; + let is_high_qc_extended = self.is_leaf_extended(high_qc_leaf_commit); + if is_high_qc_extended { + tracing::debug!("We have formed an eQC!"); + } + self.is_leaf_for_last_block(high_qc_leaf_commit) && !is_high_qc_extended + } + + /// Return true if the given leaf takes part in forming an eQC, i.e. + /// it is one of the 3-chain leaves but not the eQC leaf itself + pub fn is_leaf_forming_eqc(&self, leaf_commit: LeafCommitment) -> bool { + self.is_leaf_for_last_block(leaf_commit) && !self.is_leaf_extended(leaf_commit) + } + + /// Returns true if the given leaf can form an extended Quorum Certificate + /// The Extended Quorum Certificate (eQC) is the third Quorum Certificate formed in three + /// consecutive views for the last block in the epoch. + pub fn is_leaf_extended(&self, leaf_commit: LeafCommitment) -> bool { + if !self.is_leaf_for_last_block(leaf_commit) { + tracing::trace!("The given leaf is not for the last block in the epoch."); + return false; + } + + let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + tracing::trace!("We don't have a leaf corresponding to the leaf commit"); + return false; + }; + let leaf_view = leaf.view_number(); + let leaf_block_number = leaf.height(); + + let mut last_visited_view_number = leaf_view; + let mut is_leaf_extended = true; + if let Err(e) = self.visit_leaf_ancestors( + leaf_view, + Terminator::Inclusive(leaf_view - 2), + true, + |leaf, _, _| { + tracing::trace!( + "last_visited_view_number = {}, leaf.view_number = {}", + *last_visited_view_number, + *leaf.view_number() + ); + + if leaf.view_number() == leaf_view { + return true; + } + + if last_visited_view_number - 1 != leaf.view_number() { + tracing::trace!("The chain is broken. Non consecutive views."); + is_leaf_extended = false; + return false; + } + if leaf_block_number != leaf.height() { + tracing::trace!("The chain is broken. Block numbers do not match."); + is_leaf_extended = false; + return false; + } + last_visited_view_number = leaf.view_number(); + true + }, + ) { + is_leaf_extended = false; + tracing::trace!("The chain is broken. Leaf ascension failed."); + tracing::debug!("Leaf ascension failed; error={e}"); + } + tracing::trace!("Can the given leaf form an eQC? {}", is_leaf_extended); + is_leaf_extended + } + + /// Returns true if a given leaf is for the last block in the epoch + pub fn is_leaf_for_last_block(&self, leaf_commit: LeafCommitment) -> bool { + let Some(leaf) = self.saved_leaves.get(&leaf_commit) else { + tracing::trace!("We don't have a leaf corresponding to the leaf commit"); + return false; + }; + let block_height = leaf.height(); + if block_height == 0 || self.epoch_height == 0 { + false + } else { + block_height % self.epoch_height == 0 + } + } + + /// Returns true if the `parent_leaf` formed an eQC for the previous epoch to the `proposed_leaf` + pub fn check_eqc(&self, proposed_leaf: &Leaf, parent_leaf: &Leaf) -> bool { + if parent_leaf.view_number() == TYPES::View::genesis() { + return true; + } + let new_epoch = epoch_from_block_number(proposed_leaf.height(), self.epoch_height); + let old_epoch = epoch_from_block_number(parent_leaf.height(), self.epoch_height); + let parent_leaf_commit = as Committable>::commit(parent_leaf); + new_epoch - 1 == old_epoch && self.is_leaf_extended(parent_leaf_commit) + } } /// Alias for the block payload commitment and the associated metadata. The primary data diff --git a/crates/types/src/constants.rs b/crates/types/src/constants.rs index b4c8a39008..6a1969ae31 100644 --- a/crates/types/src/constants.rs +++ b/crates/types/src/constants.rs @@ -21,7 +21,7 @@ pub const LOOK_AHEAD: u64 = 5; pub const KAD_DEFAULT_REPUB_INTERVAL_SEC: u64 = 28800; /// the number of messages to cache in the combined network -pub const COMBINED_NETWORK_CACHE_SIZE: usize = 1000; +pub const COMBINED_NETWORK_CACHE_SIZE: usize = 200_000; /// the number of messages to attempt to send over the primary network before switching to prefer the secondary network pub const COMBINED_NETWORK_MIN_PRIMARY_FAILURES: u64 = 5; diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index 38092e0969..fc5506ff25 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -18,8 +18,6 @@ use std::{ }; use async_lock::RwLock; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::spawn_blocking; use bincode::Options; use committable::{Commitment, CommitmentBoundsArkless, Committable, RawCommitmentBuilder}; use derivative::Derivative; @@ -27,7 +25,6 @@ use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidSche use rand::Rng; use serde::{Deserialize, Serialize}; use thiserror::Error; -#[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; use utils::anytrace::*; @@ -226,8 +223,6 @@ impl VidDisperse { ) .unwrap_or_else(|err| panic!("VID disperse failure:(num_storage nodes,payload_byte_len)=({num_nodes},{}) error: {err}", txns.len())) }).await; - #[cfg(async_executor_impl = "tokio")] - // Tokio's JoinHandle's `Output` is `Result`, while in async-std it's just `T` // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); @@ -801,7 +796,7 @@ pub mod null_block { use crate::{ traits::{ block_contents::BuilderFee, - node_implementation::{ConsensusTime, NodeType, Versions}, + node_implementation::{NodeType, Versions}, signature_key::BuilderSignatureKey, BlockPayload, }, @@ -830,6 +825,7 @@ pub mod null_block { pub fn builder_fee( num_storage_nodes: usize, version: vbs::version::Version, + view_number: u64, ) -> Option> { /// Arbitrary fee amount, this block doesn't actually come from a builder const FEE_AMOUNT: u64 = 0; @@ -843,7 +839,7 @@ pub mod null_block { match TYPES::BuilderSignatureKey::sign_sequencing_fee_marketplace( &priv_key, FEE_AMOUNT, - *TYPES::View::genesis(), + view_number, ) { Ok(sig) => Some(BuilderFee { fee_amount: FEE_AMOUNT, diff --git a/crates/types/src/error.rs b/crates/types/src/error.rs index 80c1baae8d..f6c25e376f 100644 --- a/crates/types/src/error.rs +++ b/crates/types/src/error.rs @@ -14,8 +14,6 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::{data::Leaf, traits::node_implementation::NodeType}; -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} /// Error type for `HotShot` #[derive(Debug, Error)] diff --git a/crates/types/src/hotshot_config_file.rs b/crates/types/src/hotshot_config_file.rs index 93f37ca022..9a285ae6fc 100644 --- a/crates/types/src/hotshot_config_file.rs +++ b/crates/types/src/hotshot_config_file.rs @@ -29,9 +29,6 @@ pub struct HotShotConfigFile { /// Total number of staked nodes in the network pub num_nodes_with_stake: NonZeroUsize, #[serde(skip)] - /// My own public key, secret key, stake value - pub my_own_validator_config: ValidatorConfig, - #[serde(skip)] /// The known nodes' public key and stake value pub known_nodes_with_stake: Vec>, #[serde(skip)] @@ -67,7 +64,6 @@ impl From> for HotShotConfig { num_nodes_with_stake: val.num_nodes_with_stake, known_da_nodes: val.known_da_nodes, known_nodes_with_stake: val.known_nodes_with_stake, - my_own_validator_config: val.my_own_validator_config, da_staked_committee_size: val.staked_da_nodes, fixed_leader_for_gpuvid: val.fixed_leader_for_gpuvid, next_view_timeout: val.next_view_timeout, @@ -120,7 +116,6 @@ impl HotShotConfigFile { Self { num_nodes_with_stake: NonZeroUsize::new(10).unwrap(), start_threshold: (1, 1), - my_own_validator_config: ValidatorConfig::default(), known_nodes_with_stake: gen_known_nodes_with_stake, staked_da_nodes, known_da_nodes, diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 44c81e4e68..f158fcf3ae 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -8,7 +8,6 @@ use std::{fmt::Debug, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use bincode::Options; -use derivative::Derivative; use displaydoc::Display; use light_client::StateVerKey; use tracing::error; @@ -64,15 +63,12 @@ where assert_future::(Box::pin(fut)) } -#[derive(serde::Serialize, serde::Deserialize, Clone, Derivative, Display)] -#[serde(bound(deserialize = ""))] -#[derivative(Debug(bound = ""))] +#[derive(Clone, Debug, Display)] /// config for validator, including public key, private key, stake value pub struct ValidatorConfig { /// The validator's public key and stake value pub public_key: KEY, /// The validator's private key, should be in the mempool, not public - #[derivative(Debug = "ignore")] pub private_key: KEY::PrivateKey, /// The validator's stake pub stake_value: u64, @@ -176,8 +172,6 @@ pub struct HotShotConfig { pub known_nodes_with_stake: Vec>, /// All public keys known to be DA nodes pub known_da_nodes: Vec>, - /// My own validator config, including my public key, private key, stake value, serving as private parameter - pub my_own_validator_config: ValidatorConfig, /// List of DA committee (staking)nodes for static DA committee pub da_staked_committee_size: usize, /// Number of fixed leaders for GPU VID, normally it will be 0, it's only used when running GPU VID diff --git a/crates/types/src/light_client.rs b/crates/types/src/light_client.rs index 1fac6614e4..07644df0ef 100644 --- a/crates/types/src/light_client.rs +++ b/crates/types/src/light_client.rs @@ -36,7 +36,7 @@ pub type StateSignKey = schnorr::SignKey; /// Concrete for circuit's public input pub type PublicInput = GenericPublicInput; /// Key pairs for signing/verifying a light client state -#[derive(Debug, Default, Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Default, Clone)] pub struct StateKeyPair(pub schnorr::KeyPair); /// Request body to send to the state relay server diff --git a/crates/types/src/signature_key.rs b/crates/types/src/signature_key.rs index 2741912f14..43110ed8b2 100644 --- a/crates/types/src/signature_key.rs +++ b/crates/types/src/signature_key.rs @@ -23,7 +23,7 @@ use crate::{ stake_table::StakeTableEntry, traits::{ qc::QuorumCertificateScheme, - signature_key::{BuilderSignatureKey, SignatureKey}, + signature_key::{BuilderSignatureKey, PrivateSignatureKey, SignatureKey}, }, }; @@ -34,6 +34,20 @@ pub type BLSPubKey = VerKey; /// Public parameters for BLS signature scheme pub type BLSPublicParam = (); +impl PrivateSignatureKey for BLSPrivKey { + fn to_bytes(&self) -> Vec { + self.to_bytes() + } + + fn from_bytes(bytes: &[u8]) -> anyhow::Result { + Ok(Self::from_bytes(bytes)) + } + + fn to_tagged_base64(&self) -> Result { + self.to_tagged_base64() + } +} + impl SignatureKey for BLSPubKey { type PrivateKey = BLSPrivKey; type StakeTableEntry = StakeTableEntry; diff --git a/crates/types/src/traits/block_contents.rs b/crates/types/src/traits/block_contents.rs index de9968488c..5901b7242d 100644 --- a/crates/types/src/traits/block_contents.rs +++ b/crates/types/src/traits/block_contents.rs @@ -220,6 +220,7 @@ pub trait BlockHeader: builder_commitment: BuilderCommitment, metadata: >::Metadata, builder_fee: Vec>, + view_number: u64, vid_common: VidCommon, auction_results: Option, version: Version, diff --git a/crates/types/src/traits/network.rs b/crates/types/src/traits/network.rs index 07c1edd807..5a01560832 100644 --- a/crates/types/src/traits/network.rs +++ b/crates/types/src/traits/network.rs @@ -8,14 +8,6 @@ //! //! Contains types and traits used by `HotShot` to abstract over network access -use async_compatibility_layer::art::async_sleep; -use derivative::Derivative; -use dyn_clone::DynClone; -use futures::Future; -use thiserror::Error; - -#[cfg(not(any(async_executor_impl = "async-std", async_executor_impl = "tokio")))] -compile_error! {"Either config option \"async-std\" or \"tokio\" must be enabled for this crate."} use std::{ collections::HashMap, fmt::{Debug, Display}, @@ -25,14 +17,17 @@ use std::{ time::Duration, }; -use async_compatibility_layer::channel::TrySendError; use async_trait::async_trait; -use futures::future::join_all; +use derivative::Derivative; +use dyn_clone::DynClone; +use futures::{future::join_all, Future}; use rand::{ distributions::{Bernoulli, Uniform}, prelude::Distribution, }; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::{sync::mpsc::error::TrySendError, time::sleep}; use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ @@ -376,7 +371,7 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'sta } let closure = async move { if sample_keep { - async_sleep(delay).await; + sleep(delay).await; for msg in msgs { send_fn(msg).await; } diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index 43035b01b7..d05652b783 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -267,4 +267,7 @@ pub trait Versions: Clone + Copy + Debug + Send + Sync + 'static { /// The version at which to switch over to marketplace logic type Marketplace: StaticVersionType; + + /// The version at which to switch over to epochs logic + type Epochs: StaticVersionType; } diff --git a/crates/types/src/traits/signature_key.rs b/crates/types/src/traits/signature_key.rs index 52503c0cd5..93edb4e1ba 100644 --- a/crates/types/src/traits/signature_key.rs +++ b/crates/types/src/traits/signature_key.rs @@ -20,7 +20,7 @@ use committable::Committable; use ethereum_types::U256; use jf_vid::VidScheme; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use tagged_base64::TaggedBase64; +use tagged_base64::{TaggedBase64, Tb64Error}; use super::EncodeBytes; use crate::{ @@ -36,6 +36,24 @@ pub trait StakeTableEntryType { fn public_key(&self) -> K; } +/// Trait for abstracting private signature key +pub trait PrivateSignatureKey: + Send + Sync + Sized + Clone + Debug + Eq + Hash + for<'a> TryFrom<&'a TaggedBase64> +{ + /// Serialize the private key into bytes + fn to_bytes(&self) -> Vec; + + /// Deserialize the private key from bytes + /// # Errors + /// If deserialization fails. + fn from_bytes(bytes: &[u8]) -> anyhow::Result; + + /// Serialize the private key into TaggedBase64 blob. + /// # Errors + /// If serialization fails. + fn to_tagged_base64(&self) -> Result; +} + /// Trait for abstracting public key signatures /// Self is the public key type pub trait SignatureKey: @@ -56,15 +74,7 @@ pub trait SignatureKey: + Into { /// The private key type for this signature algorithm - type PrivateKey: Send - + Sync - + Sized - + Clone - + Debug - + Eq - + Serialize - + for<'a> Deserialize<'a> - + Hash; + type PrivateKey: PrivateSignatureKey; /// The type of the entry that contain both public key and stake value type StakeTableEntry: StakeTableEntryType + Send @@ -179,15 +189,7 @@ pub trait BuilderSignatureKey: + Display { /// The type of the keys builder would use to sign its messages - type BuilderPrivateKey: Send - + Sync - + Sized - + Clone - + Debug - + Eq - + Serialize - + for<'a> Deserialize<'a> - + Hash; + type BuilderPrivateKey: PrivateSignatureKey; /// The type of the signature builder would use to sign its messages type BuilderSignature: Send diff --git a/crates/types/src/traits/states.rs b/crates/types/src/traits/states.rs index f152ef99cb..956fd22d02 100644 --- a/crates/types/src/traits/states.rs +++ b/crates/types/src/traits/states.rs @@ -42,7 +42,8 @@ pub trait StateDelta: /// BlockPayload`)) /// * The ability to validate that a block header is actually a valid extension of this state and /// produce a new state, with the modifications from the block applied -/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header`)) +/// +/// ([`validate_and_apply_header`](`ValidatedState::validate_and_apply_header)) pub trait ValidatedState: Serialize + DeserializeOwned + Debug + Default + PartialEq + Eq + Send + Sync + Clone { @@ -72,6 +73,7 @@ pub trait ValidatedState: proposed_header: &TYPES::BlockHeader, vid_common: VidCommon, version: Version, + view_number: u64, ) -> impl Future> + Send; /// Construct the state with the given block header. diff --git a/crates/types/src/traits/storage.rs b/crates/types/src/traits/storage.rs index d400ce455b..81ba2f66f8 100644 --- a/crates/types/src/traits/storage.rs +++ b/crates/types/src/traits/storage.rs @@ -13,6 +13,7 @@ use std::collections::BTreeMap; use anyhow::Result; use async_trait::async_trait; +use jf_vid::VidScheme; use super::node_implementation::NodeType; use crate::{ @@ -21,6 +22,7 @@ use crate::{ event::HotShotAction, message::Proposal, simple_certificate::{QuorumCertificate, UpgradeCertificate}, + vid::VidSchemeType, }; /// Abstraction for storing a variety of consensus payload datum. @@ -29,7 +31,11 @@ pub trait Storage: Send + Sync + Clone { /// Add a proposal to the stored VID proposals. async fn append_vid(&self, proposal: &Proposal>) -> Result<()>; /// Add a proposal to the stored DA proposals. - async fn append_da(&self, proposal: &Proposal>) -> Result<()>; + async fn append_da( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> Result<()>; /// Add a proposal we sent to the store async fn append_proposal( &self, diff --git a/crates/types/src/utils.rs b/crates/types/src/utils.rs index e3d19a8286..c3f8780575 100644 --- a/crates/types/src/utils.rs +++ b/crates/types/src/utils.rs @@ -70,7 +70,7 @@ impl Clone for ViewInner { } } /// The hash of a leaf. -type LeafCommitment = Commitment>; +pub type LeafCommitment = Commitment>; /// Optional validated state and state delta. pub type StateAndDelta = ( @@ -210,3 +210,15 @@ pub fn bincode_opts() -> WithOtherTrailing< .with_fixint_encoding() .reject_trailing_bytes() } + +/// Returns an epoch number given a block number and an epoch height +#[must_use] +pub fn epoch_from_block_number(block_number: u64, epoch_height: u64) -> u64 { + if epoch_height == 0 { + 0 + } else if block_number % epoch_height == 0 { + block_number / epoch_height + } else { + block_number / epoch_height + 1 + } +} diff --git a/crates/types/src/vid.rs b/crates/types/src/vid.rs index 1bc5dd45b7..b36eaf2020 100644 --- a/crates/types/src/vid.rs +++ b/crates/types/src/vid.rs @@ -321,8 +321,9 @@ impl Precomputable for VidSchemeType { /// /// Foreign type rules prevent us from doing: /// - `impl From> for VidDisperse` -/// - `impl VidDisperse {...}` and similarly for `Statement`. +/// - `impl VidDisperse {...}` /// +/// and similarly for `Statement`. /// Thus, we accomplish type conversion via functions. fn vid_disperse_conversion(vid_disperse: VidDisperse) -> VidDisperse { VidDisperse { diff --git a/crates/utils/src/anytrace.rs b/crates/utils/src/anytrace.rs index 62628207d1..05d6c1d9ea 100644 --- a/crates/utils/src/anytrace.rs +++ b/crates/utils/src/anytrace.rs @@ -6,7 +6,7 @@ mod macros; pub use macros::*; /// Default log level for the crate -pub const DEFAULT_LOG_LEVEL: Level = Level::Info; +pub const DEFAULT_LOG_LEVEL: Level = Level::Debug; /// Trait for logging errors pub trait Log { diff --git a/crates/utils/src/anytrace/macros.rs b/crates/utils/src/anytrace/macros.rs index 71036d21fb..29c5178b07 100644 --- a/crates/utils/src/anytrace/macros.rs +++ b/crates/utils/src/anytrace/macros.rs @@ -1,8 +1,10 @@ #[macro_export] /// Print the file and line number of the location this macro is invoked +/// +/// Note: temporarily prints only a null string to reduce verbosity of logging macro_rules! line_info { () => { - format!("{}:{}", file!(), line!()) + format!("") }; } pub use line_info; diff --git a/docker/validator-cdn-local.Dockerfile b/docker/validator-cdn-local.Dockerfile index a9e7dc2fe7..156fc50571 100644 --- a/docker/validator-cdn-local.Dockerfile +++ b/docker/validator-cdn-local.Dockerfile @@ -4,9 +4,8 @@ RUN apt-get update \ && apt-get install -y curl libcurl4 wait-for-it tini \ && rm -rf /var/lib/apt/lists/* -ARG ASYNC_EXECUTOR=async-std -COPY --chmod=0755 ./target/${ASYNC_EXECUTOR}/release-lto/examples/validator-push-cdn /usr/local/bin/validator-push-cdn +COPY --chmod=0755 ./target/release-lto/examples/validator-push-cdn /usr/local/bin/validator-push-cdn # logging ENV RUST_LOG="warn" diff --git a/docs/diagrams/event_discriptions/QuorumProposalRecv.md b/docs/diagrams/event_discriptions/QuorumProposalRecv.md index 4ef1fb287b..4497ad0c18 100644 --- a/docs/diagrams/event_discriptions/QuorumProposalRecv.md +++ b/docs/diagrams/event_discriptions/QuorumProposalRecv.md @@ -15,7 +15,7 @@ * If `ViewSyncCommitCertificate`s are not included in the `QuorumProposal` it is possible for honest nodes to reject valid proposals. For example, a view leader may receive the `ViewSyncCommitCertificate` from the view sync relay, update its local view, and send a `QuorumProposal` for the new view. But other nodes may not have received the `ViewSyncCommitCertificate` yet (either through genuine network delays or Byzantine behavior by the view sync relay). Thus, the other nodes will not be able to process the `QuorumProposal` since they do not have evidence the network has advanced to the view in the `ViewSyncCommitCertificate`. These nodes would either need to reject the proposal outright, or wait to process the proposal until they receive the `ViewSyncCommitCertificate`. The former behavior would cause unnecessary view failures, and the latter would add complexity to the code. Including the `ViewSyncCommitCertificate` in the `QuorumProposal` addresses both these concerns. * It is possible for a node to receive multiple `ViewSyncCommitCertificate`s: one from the view sync relay and one from the view leader. Therefore processing this certificate must be idempotent. * It is possible (and valid) for a node to receive valid view change evidence but an invalid `QuorumProposal`. There are several ways to handle this scenario, but we choose to allow nodes to update their `latest_known_view` even if other validation of the proposal fails. This helps the network maintain view synchronization in spite of invalid proposals. -* It is possible for a `QuorumProposal` to have valid view change evidence from a `TimeoutCertificate` or `ViewSyncCommitCertificate`, but to have an invalid `QurorumCertificate`. It would be valid to update the view in this case, since there exists valid view change evidence. But for simplicity, we choose not to. If view change evidence is valid but the `QuorumCertificate` associated with the proposal is invalid nodes will not update their view. +* It is possible for a `QuorumProposal` to have valid view change evidence from a `TimeoutCertificate` or `ViewSyncCommitCertificate`, but to have an invalid `QuorumCertificate`. It would be valid to update the view in this case, since there exists valid view change evidence. But for simplicity, we choose not to. If view change evidence is valid but the `QuorumCertificate` associated with the proposal is invalid nodes will not update their view. ## Proposal Validation * This proposal validation represents the [HotStuff-2](https://eprint.iacr.org/2023/397.pdf) protocol. diff --git a/flake.lock b/flake.lock index 1bc6b5b40a..a5468591dd 100644 --- a/flake.lock +++ b/flake.lock @@ -41,11 +41,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1729924178, - "narHash": "sha256-ZhDqOYZwx0kYg1vPrmZ2fJm/wem739eNSSK+GlzdeqA=", + "lastModified": 1731133750, + "narHash": "sha256-gZ3m8e176ai+akwiayg7Mve73rcUCD0+l6OMMFhGEYI=", "owner": "nix-community", "repo": "fenix", - "rev": "e831b4d256526cc56bd37c7c579842866410bebc", + "rev": "87e4581cdfecbac602220fe76c67b37d2d1ee995", "type": "github" }, "original": { @@ -72,11 +72,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1729850857, - "narHash": "sha256-WvLXzNNnnw+qpFOmgaM3JUlNEH+T4s22b5i2oyyCpXE=", + "lastModified": 1730958623, + "narHash": "sha256-JwQZIGSYnRNOgDDoIgqKITrPVil+RMWHsZH1eE1VGN0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "41dea55321e5a999b17033296ac05fe8a8b5a257", + "rev": "85f7e662eda4fa3a995556527c87b2524b691933", "type": "github" }, "original": { @@ -99,11 +99,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1729845655, - "narHash": "sha256-6I3gJLnOLnUIWFUlEnvC0FdzX8Xwu+y3Vo0q4VB6Wbk=", + "lastModified": 1731056261, + "narHash": "sha256-TPeXChHVcaCBAoE349K7OZH4We5/2ys1GgG4IiwjwOs=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "f4466718b838de706d74e2c13f20a41c034d87a5", + "rev": "dd9cd22514cb1001a0a2374b36a85eb75245f27b", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index d67b176017..f886824290 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ ]) (fenix.packages.${system}.fromToolchainFile { dir = ./.; - sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU="; + sha256 = "sha256-yMuSb5eQPO/bHv+Bcf/US8LVMbf/G/0MSfiPwBhiPpk="; }) ]; # needed for compiling static binary diff --git a/justfile b/justfile index c067269f75..12ee669e29 100644 --- a/justfile +++ b/justfile @@ -2,36 +2,8 @@ default: run_ci set export -original_rustflags := env_var_or_default('RUSTFLAGS', '--cfg hotshot_example') -original_rustdocflags := env_var_or_default('RUSTDOCFLAGS', '--cfg hotshot_example') -original_target_dir := env_var_or_default('CARGO_TARGET_DIR', 'target') - run_ci: lint build test -async := "async-std" - -# Run arbitrary cargo commands, with e.g. -# just async=async-std cargo check -# or -# just async=tokio cargo test --tests test_consensus_task -# Defaults to async-std. - -@cargo *ARGS: - echo setting async executor to {{async}} - export RUSTDOCFLAGS='-D warnings --cfg async_executor_impl="{{async}}" --cfg async_channel_impl="{{async}}" {{original_rustdocflags}}' RUSTFLAGS='--cfg async_executor_impl="{{async}}" --cfg async_channel_impl="{{async}}" {{original_rustflags}}' CARGO_TARGET_DIR='{{original_target_dir}}/{{async}}' && cargo {{ARGS}} - -@tokio target *ARGS: - echo setting executor to tokio - export RUSTDOCFLAGS='-D warnings --cfg async_executor_impl="tokio" --cfg async_channel_impl="tokio" {{original_rustdocflags}}' RUSTFLAGS='--cfg async_executor_impl="tokio" --cfg async_channel_impl="tokio" {{original_rustflags}}' CARGO_TARGET_DIR='{{original_target_dir}}/tokio' && just {{target}} {{ARGS}} - -@async_std target *ARGS: - echo setting executor to async-std - export RUST_MIN_STACK=4194304 RUSTDOCFLAGS='-D warnings --cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" {{original_rustdocflags}}' RUSTFLAGS='--cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" {{original_rustflags}}' CARGO_TARGET_DIR='{{original_target_dir}}/async-std' && just {{target}} {{ARGS}} - -@async-std target *ARGS: - echo setting executor to async-std - export RUST_MIN_STACK=4194304 RUSTDOCFLAGS='-D warnings --cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" {{original_rustdocflags}}' RUSTFLAGS='--cfg async_executor_impl="async-std" --cfg async_channel_impl="async-std" {{original_rustflags}}' CARGO_TARGET_DIR='{{original_target_dir}}/async-std' && just {{target}} {{ARGS}} - build: cargo build --workspace --examples --bins --tests --lib --benches @@ -82,7 +54,7 @@ test-ci-5 *ARGS: test_basic: test_success test_with_failures test_network_task test_consensus_task test_da_task test_vid_task test_view_sync_task test_catchup: - echo Testing with async std executor + echo Testing catchup cargo test --lib --bins --tests --benches --workspace --no-fail-fast test_catchup -- --test-threads=1 --nocapture test_crypto: @@ -210,7 +182,7 @@ fix: cargo fix --allow-dirty --allow-staged --workspace --lib --bins --tests --benches doc: - echo Generating docs {{env_var('RUSTFLAGS')}} + echo Generating docs cargo doc --no-deps --bins --examples --lib -p 'hotshot-types' cargo doc --no-deps --workspace --document-private-items --bins --examples --lib diff --git a/pull_request_template.md b/pull_request_template.md index 3da8f2090c..20fbfc132f 100644 --- a/pull_request_template.md +++ b/pull_request_template.md @@ -20,7 +20,7 @@ Closes # - +