diff --git a/.env b/.env index 0f3b7cf39..e772ca023 100644 --- a/.env +++ b/.env @@ -43,6 +43,8 @@ ESPRESSO_SEQUENCER_ETH_MNEMONIC="test test test test test test test test test te ESPRESSO_COMMITMENT_TASK_PORT=30010 ESPRESSO_SEQUENCER0_DB_PORT=5432 ESPRESSO_SEQUENCER1_DB_PORT=5433 +MARKETPLACE_SOLVER_POSTGRES_PORT=5434 +ESPRESSO_MARKETPLACE_SOLVER_API_PORT=25000 ESPRESSO_STATE_RELAY_SERVER_PORT=30011 ESPRESSO_STATE_RELAY_SERVER_URL=http://state-relay-server:${ESPRESSO_STATE_RELAY_SERVER_PORT} ESPRESSO_BLOCK_EXPLORER_PORT=3000 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 49b9f68a7..e4fcca432 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -80,6 +80,7 @@ jobs: target/release/espresso-dev-node target/release/pub-key target/release/espresso-bridge + target/release/marketplace-solver build-arm: runs-on: buildjet-4vcpu-ubuntu-2204-arm @@ -131,6 +132,7 @@ jobs: target/release/espresso-dev-node target/release/pub-key target/release/espresso-bridge + target/release/marketplace-solver build-dockers: runs-on: ubuntu-latest @@ -150,6 +152,7 @@ jobs: nasty-client-tag: ${{ steps.nasty-client.outputs.tags }} espresso-dev-node-tag: ${{ steps.espresso-dev-node.outputs.tags }} bridge-tag: ${{ steps.bridge.outputs.tags }} + marketplace-solver-tag: ${{ steps.marketplace-solver.outputs.tags }} steps: - name: Checkout Repository uses: actions/checkout@v4 @@ -263,6 +266,12 @@ jobs: with: images: ghcr.io/espressosystems/espresso-sequencer/bridge + - name: Generate marketplace-solver metadata + uses: docker/metadata-action@v5 + id: marketplace-solver + with: + images: ghcr.io/espressosystems/espresso-sequencer/marketplace-solver + - name: Build and push sequencer docker uses: docker/build-push-action@v6 with: @@ -402,6 +411,16 @@ jobs: tags: ${{ steps.bridge.outputs.tags }} labels: ${{ steps.bridge.outputs.labels }} + - name: Build and push marketplace-solver docker + uses: docker/build-push-action@v6 + with: + context: ./ + file: ./docker/marketplace-solver.Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.marketplace-solver.outputs.tags }} + labels: ${{ steps.marketplace-solver.outputs.labels }} + test-demo: if: ${{ github.event_name != 'pull_request' }} runs-on: ubuntu-latest @@ -430,6 +449,8 @@ jobs: docker pull ${{ needs.build-dockers.outputs.builder-tag }} docker pull ${{ needs.build-dockers.outputs.nasty-client-tag }} docker pull ${{ needs.build-dockers.outputs.bridge-tag }} + docker pull ${{ needs.build-dockers.outputs.marketplace-solver-tag }} + - name: Tag new docker images run: | docker tag ${{ needs.build-dockers.outputs.sequencer-tag }} ghcr.io/espressosystems/espresso-sequencer/sequencer:main @@ -445,6 +466,7 @@ jobs: docker tag ${{ needs.build-dockers.outputs.builder-tag }} ghcr.io/espressosystems/espresso-sequencer/builder:main docker tag ${{ needs.build-dockers.outputs.nasty-client-tag }} ghcr.io/espressosystems/espresso-sequencer/nasty-client:main docker tag ${{ needs.build-dockers.outputs.bridge-tag }} ghcr.io/espressosystems/espresso-sequencer/bridge:main + docker tag ${{ needs.build-dockers.outputs.marketplace-solver-tag }} ghcr.io/espressosystems/espresso-sequencer/marketplace-solver:main - name: Test docker demo run: | diff --git a/.github/workflows/build_static.yml b/.github/workflows/build_static.yml index 0d533fd38..73187114b 100644 --- a/.github/workflows/build_static.yml +++ b/.github/workflows/build_static.yml @@ -91,6 +91,7 @@ jobs: ${{ env.CARGO_TARGET_DIR }}/${{ env.TARGET_TRIPLET }}/release/pub-key ${{ env.CARGO_TARGET_DIR }}/${{ env.TARGET_TRIPLET }}/release/espresso-bridge ${{ env.CARGO_TARGET_DIR }}/${{ env.TARGET_TRIPLET }}/release/espresso-dev-node + ${{ env.CARGO_TARGET_DIR }}/${{ env.TARGET_TRIPLET }}/release/marketplace-solver static-dockers: runs-on: ubuntu-latest @@ -208,6 +209,13 @@ jobs: images: ghcr.io/espressosystems/espresso-sequencer/bridge flavor: suffix=musl + - name: Generate marketplace-solver metadata + uses: docker/metadata-action@v5 + id: marketplace-solver + with: + images: ghcr.io/espressosystems/espresso-sequencer/marketplace-solver + flavor: suffix=musl + - name: Build and push sequencer docker uses: docker/build-push-action@v6 with: @@ -327,3 +335,13 @@ jobs: push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.bridge.outputs.tags }} labels: ${{ steps.bridge.outputs.labels }} + + - name: Build and push marketplace-solver docker + uses: docker/build-push-action@v6 + with: + context: ./ + file: ./docker/marketplace-solver.Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.marketplace-solver.outputs.tags }} + labels: ${{ steps.marketplace-solver.outputs.labels }} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 44bf2c4fe..b4a98898d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1304,7 +1304,7 @@ dependencies = [ "committable", "dotenvy", "es-version", - "espresso-types", + "espresso-types 0.1.0", "ethers", "futures", "hotshot", @@ -1322,6 +1322,7 @@ dependencies = [ "portpicker", "rand 0.8.5", "sequencer", + "sequencer-utils 0.1.0", "serde", "snafu 0.8.4", "surf", @@ -2016,6 +2017,15 @@ dependencies = [ "serde", ] +[[package]] +name = "contract-bindings" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main#6d52a7e331a5517d9c19ea459ee18947f6c567cc" +dependencies = [ + "ethers", + "serde", +] + [[package]] name = "convert_case" version = "0.4.0" @@ -2558,7 +2568,7 @@ dependencies = [ "clap", "diff-test-bn254", "ethers", - "hotshot-contract-adapter", + "hotshot-contract-adapter 0.1.0", "hotshot-state-prover", "itertools 0.12.1", "jf-pcs", @@ -2916,7 +2926,59 @@ dependencies = [ "clap", "cld", "committable", - "contract-bindings", + "contract-bindings 0.1.0", + "derive_more", + "es-version", + "ethers", + "fluent-asserter", + "futures", + "hotshot", + "hotshot-orchestrator", + "hotshot-query-service", + "hotshot-testing", + "hotshot-types", + "itertools 0.12.1", + "jf-merkle-tree", + "jf-utils", + "jf-vid", + "num-traits", + "paste", + "pretty_assertions", + "rand 0.8.5", + "sequencer-utils 0.1.0", + "serde", + "serde_json", + "sha2 0.10.8", + "snafu 0.8.4", + "static_assertions", + "surf-disco", + "tagged-base64", + "thiserror", + "tide-disco", + "time 0.3.36", + "tracing", + "url", + "vbs", +] + +[[package]] +name = "espresso-types" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main#6d52a7e331a5517d9c19ea459ee18947f6c567cc" +dependencies = [ + "anyhow", + "ark-serialize", + "async-compatibility-layer", + "async-std", + "async-trait", + "base64-bytes", + "bincode", + "blake3", + "bytesize", + "clap", + "cld", + "committable", + "contract-bindings 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", "derive_more", "es-version", "ethers", @@ -2935,14 +2997,16 @@ dependencies = [ "paste", "pretty_assertions", "rand 0.8.5", - "sequencer-utils", + "sequencer-utils 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", "serde", "serde_json", "sha2 0.10.8", "snafu 0.8.4", "static_assertions", + "surf-disco", "tagged-base64", "thiserror", + "tide-disco", "time 0.3.36", "tracing", "url", @@ -3666,7 +3730,7 @@ name = "gen-vk-contract" version = "0.1.0" dependencies = [ "ark-srs", - "hotshot-contract-adapter", + "hotshot-contract-adapter 0.1.0", "hotshot-stake-table", "hotshot-state-prover", "jf-pcs", @@ -4046,8 +4110,8 @@ dependencies = [ [[package]] name = "hotshot" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-broadcast", @@ -4093,7 +4157,7 @@ dependencies = [ [[package]] name = "hotshot-builder-api" version = "0.1.7" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "async-trait", "clap", @@ -4111,8 +4175,8 @@ dependencies = [ [[package]] name = "hotshot-builder-core" -version = "0.1.26" -source = "git+https://github.com/EspressoSystems/hotshot-builder-core?tag=rc-0.1.36#7499ae36f022de5b9e4b997a2eebf4a6575808c5" +version = "0.1.38" +source = "git+https://github.com/EspressoSystems/hotshot-builder-core?tag=0.1.38#2a698f56ec3cccb367ce9975d1bbcfd2a377bc8a" dependencies = [ "anyhow", "async-broadcast", @@ -4148,7 +4212,28 @@ dependencies = [ "ark-ff", "ark-poly", "ark-std", - "contract-bindings", + "contract-bindings 0.1.0", + "diff-test-bn254", + "ethers", + "hotshot-types", + "jf-pcs", + "jf-plonk", + "jf-utils", + "num-bigint", + "num-traits", +] + +[[package]] +name = "hotshot-contract-adapter" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main#6d52a7e331a5517d9c19ea459ee18947f6c567cc" +dependencies = [ + "anyhow", + "ark-bn254", + "ark-ff", + "ark-poly", + "ark-std", + "contract-bindings 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", "diff-test-bn254", "ethers", "hotshot-types", @@ -4161,8 +4246,8 @@ dependencies = [ [[package]] name = "hotshot-events-service" -version = "0.1.33" -source = "git+https://github.com/EspressoSystems/hotshot-events-service.git?tag=rc-0.1.35#9cb15d86d5dae9e5b401f373680a4187dec825e9" +version = "0.1.37" +source = "git+https://github.com/EspressoSystems/hotshot-events-service.git?tag=0.1.37#31ec74a628160714eb849d6eb5954a3c0ea9fbd3" dependencies = [ "async-broadcast", "async-compatibility-layer", @@ -4187,8 +4272,8 @@ dependencies = [ [[package]] name = "hotshot-example-types" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-broadcast", @@ -4220,8 +4305,8 @@ dependencies = [ [[package]] name = "hotshot-fakeapi" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-lock 2.8.0", @@ -4239,8 +4324,8 @@ dependencies = [ [[package]] name = "hotshot-macros" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "derive_builder", "proc-macro2", @@ -4250,8 +4335,8 @@ dependencies = [ [[package]] name = "hotshot-orchestrator" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-compatibility-layer", @@ -4280,8 +4365,8 @@ dependencies = [ [[package]] name = "hotshot-query-service" -version = "0.1.45" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=rc-0.1.46#0ebce04743f82987b60aa273657bc5f898d818f1" +version = "0.1.49" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=0.1.49#5ec6d4d9e8f1283ab35da84f4b92907636c31a34" dependencies = [ "anyhow", "ark-serialize", @@ -4336,8 +4421,8 @@ dependencies = [ [[package]] name = "hotshot-stake-table" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "ark-bn254", "ark-ed-on-bn254", @@ -4370,12 +4455,12 @@ dependencies = [ "async-std", "clap", "cld", - "contract-bindings", + "contract-bindings 0.1.0", "displaydoc", "es-version", "ethers", "futures", - "hotshot-contract-adapter", + "hotshot-contract-adapter 0.1.0", "hotshot-stake-table", "hotshot-types", "itertools 0.12.1", @@ -4387,7 +4472,7 @@ dependencies = [ "jf-signature", "jf-utils", "reqwest 0.12.5", - "sequencer-utils", + "sequencer-utils 0.1.0", "serde", "snafu 0.8.4", "surf-disco", @@ -4401,8 +4486,8 @@ dependencies = [ [[package]] name = "hotshot-task" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-broadcast", @@ -4416,8 +4501,8 @@ dependencies = [ [[package]] name = "hotshot-task-impls" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-broadcast", @@ -4452,8 +4537,8 @@ dependencies = [ [[package]] name = "hotshot-testing" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-broadcast", @@ -4498,7 +4583,7 @@ dependencies = [ [[package]] name = "hotshot-types" version = "0.1.11" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "ark-bn254", @@ -4534,6 +4619,7 @@ dependencies = [ "memoize", "rand 0.8.5", "rand_chacha 0.3.1", + "reqwest 0.12.5", "serde", "sha2 0.10.8", "snafu 0.8.4", @@ -5742,8 +5828,8 @@ dependencies = [ [[package]] name = "libp2p-networking" -version = "0.5.63" -source = "git+https://github.com/EspressoSystems/hotshot?tag=rc-0.5.64#8a84c2760bb91495be387c8ecbcf8ae400457807" +version = "0.5.67" +source = "git+https://github.com/EspressoSystems/hotshot?tag=0.5.67#a8115816f131bf56b6689937dc8029871bbe8476" dependencies = [ "anyhow", "async-compatibility-layer", @@ -6039,6 +6125,16 @@ dependencies = [ "value-bag", ] +[[package]] +name = "log-panics" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f9dd8546191c1850ecf67d22f5ff00a935b890d0e84713159a55495cc2ac5f" +dependencies = [ + "backtrace", + "log", +] + [[package]] name = "lru" version = "0.7.8" @@ -6077,6 +6173,37 @@ dependencies = [ "regex", ] +[[package]] +name = "marketplace-solver" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/marketplace-solver.git?tag=0.1.2#11f4e09f5fbf70f825230d37ec3c03d199700149" +dependencies = [ + "anyhow", + "async-compatibility-layer", + "async-std", + "async-trait", + "bincode", + "clap", + "cld", + "committable", + "espresso-types 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", + "futures", + "hotshot", + "hotshot-events-service", + "hotshot-types", + "jf-signature", + "rand 0.8.5", + "serde", + "serde_json", + "sqlx", + "surf-disco", + "thiserror", + "tide-disco", + "toml", + "tracing", + "vbs", +] + [[package]] name = "match_cfg" version = "0.1.0" @@ -8372,7 +8499,7 @@ dependencies = [ "clap", "cld", "committable", - "contract-bindings", + "contract-bindings 0.1.0", "csv", "derivative", "derive_more", @@ -8380,11 +8507,11 @@ dependencies = [ "es-version", "escargot", "espresso-macros", - "espresso-types", + "espresso-types 0.1.0", "ethers", "futures", "hotshot", - "hotshot-contract-adapter", + "hotshot-contract-adapter 0.1.0", "hotshot-events-service", "hotshot-example-types", "hotshot-orchestrator", @@ -8401,6 +8528,8 @@ dependencies = [ "jf-signature", "jf-vid", "libp2p", + "marketplace-solver", + "num-traits", "num_enum", "portpicker", "pretty_assertions", @@ -8408,7 +8537,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_distr", "reqwest 0.12.5", - "sequencer-utils", + "sequencer-utils 0.1.0", "serde", "serde_json", "sha2 0.10.8", @@ -8435,14 +8564,42 @@ version = "0.1.0" dependencies = [ "anyhow", "ark-serialize", + "async-compatibility-layer", + "async-std", + "clap", + "committable", + "contract-bindings 0.1.0", + "derive_more", + "ethers", + "futures", + "hotshot-contract-adapter 0.1.0", + "log-panics", + "portpicker", + "serde", + "serde_json", + "surf", + "tempfile", + "tracing", + "url", +] + +[[package]] +name = "sequencer-utils" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main#6d52a7e331a5517d9c19ea459ee18947f6c567cc" +dependencies = [ + "anyhow", + "ark-serialize", + "async-compatibility-layer", "async-std", "clap", "committable", - "contract-bindings", + "contract-bindings 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", "derive_more", "ethers", "futures", - "hotshot-contract-adapter", + "hotshot-contract-adapter 0.1.0 (git+https://github.com/EspressoSystems/espresso-sequencer.git?branch=main)", + "log-panics", "portpicker", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 98296abaa..ee1c4118e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,15 +8,15 @@ edition = "2021" resolver = "2" members = [ - "builder", - "contract-bindings", - "contracts/rust/adapter", - "contracts/rust/diff-test", - "contracts/rust/gen-vk-contract", - "hotshot-state-prover", - "sequencer", - "types", - "utils", + "builder", + "contract-bindings", + "contracts/rust/adapter", + "contracts/rust/diff-test", + "contracts/rust/gen-vk-contract", + "hotshot-state-prover", + "sequencer", + "types", + "utils", ] [workspace.dependencies] @@ -31,7 +31,7 @@ ark-poly = "0.4" ark-serialize = "0.4" ark-srs = "0.3.1" async-compatibility-layer = { version = "1.1", default-features = false, features = [ - "logging-utils", + "logging-utils", ] } async-once-cell = "0.5" async-std = { version = "1.12.0", features = ["attributes", "tokio1"] } @@ -48,59 +48,61 @@ dotenvy = "0.15" ethers = { version = "2.0", features = ["solc"] } futures = "0.3" -hotshot = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } +hotshot = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } # Hotshot imports -hotshot-builder-api = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "rc-0.5.64" } -hotshot-builder-core = { git = "https://github.com/EspressoSystems/hotshot-builder-core", tag = "rc-0.1.36" } -hotshot-events-service = { git = "https://github.com/EspressoSystems/hotshot-events-service.git", tag = "rc-0.1.35" } -hotshot-orchestrator = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } -hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", tag = "rc-0.1.46" } -hotshot-stake-table = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } +hotshot-builder-api = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.67" } +hotshot-builder-core = { git = "https://github.com/EspressoSystems/hotshot-builder-core", tag = "0.1.38" } +marketplace-builder-core = { git = "https://github.com/EspressoSystems/marketplace-builder-core", rev = "c1e60e76893557f21e8aea7e86994ebe107b4914" } +hotshot-events-service = { git = "https://github.com/EspressoSystems/hotshot-events-service.git", tag = "0.1.37" } +hotshot-orchestrator = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } +hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", tag = "0.1.49" } +hotshot-stake-table = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } hotshot-state-prover = { version = "0.1.0", path = "hotshot-state-prover" } -hotshot-task = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } -hotshot-testing = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } -hotshot-types = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } +hotshot-task = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } +hotshot-testing = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } +hotshot-types = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } hotshot-contract-adapter = { version = "0.1.0", path = "contracts/rust/adapter" } # Temporary, used to pull in the mock auction results provider -hotshot-example-types = { git = "https://github.com/EspressoSystems/hotshot", tag = "rc-0.5.64" } +hotshot-example-types = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.67" } # Push CDN imports cdn-broker = { git = "https://github.com/EspressoSystems/Push-CDN", features = [ - "runtime-async-std", - "global-permits", + "runtime-async-std", + "global-permits", ], tag = "0.4.3", package = "cdn-broker" } cdn-marshal = { git = "https://github.com/EspressoSystems/Push-CDN", features = [ - "runtime-async-std", - "global-permits", + "runtime-async-std", + "global-permits", ], tag = "0.4.3", package = "cdn-marshal" } jf-plonk = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "test-apis", + "test-apis", ] } jf-crhf = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } jf-merkle-tree = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", + "std", ] } jf-signature = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", + "std", ] } jf-pcs = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", - "parallel", + "std", + "parallel", ] } jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", - "parallel", + "std", + "parallel", ] } jf-rescue = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", - "parallel", + "std", + "parallel", ] } jf-relation = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ - "std", + "std", ] } jf-utils = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } libp2p = { version = "0.53", default-features = false } +log-panics = { version = "2.0", features = ["with-backtrace"] } snafu = "0.8" strum = { version = "0.26", features = ["derive"] } surf-disco = "0.9" diff --git a/builder/Cargo.toml b/builder/Cargo.toml index ed7815333..ccf463ed7 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -39,6 +39,7 @@ libp2p = { workspace = true } portpicker = { workspace = true } rand = "0.8.5" sequencer = { path = "../sequencer", features = ["testing"] } +sequencer-utils = { path = "../utils" } serde = { workspace = true } snafu = { workspace = true } surf = "2.3.1" diff --git a/builder/src/bin/permissioned-builder.rs b/builder/src/bin/permissioned-builder.rs index b76d02b24..093191730 100644 --- a/builder/src/bin/permissioned-builder.rs +++ b/builder/src/bin/permissioned-builder.rs @@ -3,7 +3,6 @@ use std::{ }; use anyhow::{bail, Context}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use builder::permissioned::init_node; use clap::Parser; use es_version::SEQUENCER_VERSION; @@ -19,6 +18,7 @@ use libp2p::Multiaddr; use sequencer::{ options::parse_duration, persistence::no_storage::NoStorage, Genesis, L1Params, NetworkParams, }; +use sequencer_utils::logging; use url::Url; #[derive(Parser, Clone, Debug)] @@ -188,6 +188,9 @@ pub struct PermissionedBuilderOptions { /// Whether or not we are a DA node. #[clap(long, env = "ESPRESSO_SEQUENCER_IS_DA", action)] pub is_da: bool, + + #[clap(flatten)] + logging: logging::Config, } impl PermissionedBuilderOptions { @@ -215,10 +218,8 @@ impl PermissionedBuilderOptions { } #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let opt = PermissionedBuilderOptions::parse(); + opt.logging.init(); let (private_staking_key, private_state_key) = opt.private_keys()?; diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 0c250eae2..45326662a 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -1,6 +1,5 @@ use std::{num::NonZeroUsize, path::PathBuf, str::FromStr, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use builder::non_permissioned::{build_instance_state, BuilderConfig}; use clap::Parser; use cld::ClDuration; @@ -9,6 +8,7 @@ use hotshot::traits::ValidatedState; use hotshot_builder_core::testing::basic_test::NodeType; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use sequencer::{Genesis, L1Params}; +use sequencer_utils::logging; use snafu::Snafu; use url::Url; use vbs::version::StaticVersionType; @@ -84,6 +84,9 @@ struct NonPermissionedBuilderOptions { /// Path to TOML file containing genesis state. #[clap(long, name = "GENESIS_FILE", env = "ESPRESSO_BUILDER_GENESIS_FILE")] genesis_file: PathBuf, + + #[clap(flatten)] + logging: logging::Config, } #[derive(Clone, Debug, Snafu)] @@ -101,10 +104,9 @@ fn parse_duration(s: &str) -> Result { #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let opt = NonPermissionedBuilderOptions::parse(); + opt.logging.init(); + let genesis = Genesis::from_file(&opt.genesis_file)?; let l1_params = L1Params { diff --git a/builder/src/lib.rs b/builder/src/lib.rs old mode 100644 new mode 100755 index 4bd5d48d1..310b07b10 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -119,7 +119,7 @@ pub mod testing { use async_trait::async_trait; use committable::Committable; use espresso_types::{ - mock::MockStateCatchup, ChainConfig, Event, FeeAccount, L1Client, NodeState, PrivKey, + mock::MockStateCatchup, v0_3::ChainConfig, Event, FeeAccount, L1Client, NodeState, PrivKey, PubKey, Transaction, ValidatedState, }; use ethers::{ @@ -155,6 +155,7 @@ pub mod testing { traits::{ block_contents::{vid_commitment, BlockHeader, GENESIS_VID_NUM_STORAGE_NODES}, metrics::NoMetrics, + network::Topic, node_implementation::ConsensusTime, signature_key::BuilderSignatureKey as _, }, @@ -393,8 +394,9 @@ pub mod testing { } let network = Arc::new(MemoryNetwork::new( - config.my_own_validator_config.public_key, + &config.my_own_validator_config.public_key, &self.master_map, + &[Topic::Global, Topic::Da], None, )); @@ -666,10 +668,6 @@ pub mod testing { #[cfg(test)] mod test { - //use self::testing::mock_node_state; - - //use super::{transaction::ApplicationTransaction, vm::TestVm, *}; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::stream::IntoStream; use clap::builder; use es_version::SequencerVersion; @@ -686,6 +684,7 @@ mod test { sql, }, }; + use sequencer_utils::test_utils::setup_test; use testing::{wait_for_decide_on_handle, HotShotTestConfig}; use super::*; @@ -696,8 +695,7 @@ mod test { #[ignore] #[async_std::test] async fn test_non_voting_hotshot_node() { - setup_logging(); - setup_backtrace(); + setup_test(); let ver = SequencerVersion::instance(); diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index ac4ee4271..d1c7d076e 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -10,8 +10,8 @@ use async_compatibility_layer::{ }; use async_std::sync::{Arc, RwLock}; use espresso_types::{ - eth_signature_key::EthKeyPair, ChainConfig, FeeAmount, L1Client, NodeState, Payload, SeqTypes, - ValidatedState, + eth_signature_key::EthKeyPair, v0_3::ChainConfig, FeeAmount, L1Client, NodeState, Payload, + SeqTypes, ValidatedState, }; use ethers::{ core::k256::ecdsa::SigningKey, @@ -255,6 +255,7 @@ mod test { }, }; use sequencer::persistence::no_storage::{self, NoStorage}; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use super::*; @@ -267,8 +268,7 @@ mod test { /// Builder subscrived to this api, and server the hotshot client request and the private mempool tx submission #[async_std::test] async fn test_non_permissioned_builder() { - setup_logging(); - setup_backtrace(); + setup_test(); let ver = SequencerVersion::instance(); // Hotshot Test Config diff --git a/builder/src/permissioned.rs b/builder/src/permissioned.rs index cd6bc949e..520f45f33 100644 --- a/builder/src/permissioned.rs +++ b/builder/src/permissioned.rs @@ -40,8 +40,8 @@ use hotshot::{ traits::{ election::static_committee::GeneralStaticCommittee, implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, KeyPair, Libp2pNetwork, - PushCdnNetwork, Topic, WrappedSignatureKey, + derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, CombinedNetworks, KeyPair, + Libp2pNetwork, PushCdnNetwork, WrappedSignatureKey, }, BlockPayload, }, @@ -84,7 +84,7 @@ use hotshot_types::{ block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, election::Membership, metrics::Metrics, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{ConsensusTime, NodeType}, EncodeBytes, }, @@ -213,7 +213,7 @@ pub async fn init_node, P: SequencerPersistence, Ver: StaticVersionTyp mod test { use std::time::Duration; - use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - logging::{setup_backtrace, setup_logging}, - }; + use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; use async_std::task; use es_version::SequencerVersion; @@ -575,6 +579,7 @@ mod test { }, }; use sequencer::persistence::no_storage::{self, NoStorage}; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use super::*; @@ -588,8 +593,7 @@ mod test { #[async_std::test] async fn test_permissioned_builder() { - setup_logging(); - setup_backtrace(); + setup_test(); let ver = SequencerVersion::instance(); diff --git a/data/README.md b/data/README.md index d7f0fd598..674864b6c 100644 --- a/data/README.md +++ b/data/README.md @@ -6,10 +6,10 @@ commitments. The objects in this directory have well-known commitments. They ser by the Espresso Sequencer, and can be used as test cases for ports of the serialization and commitment algorithms to other languages. -The Rust module `espresso-types::reference_tests` contains test cases which are designed to fail if the serialization format -or commitment scheme for any of these data types changes. If you make a breaking change, you may need to update these -reference objects as well. Running those tests will also print out information about the commitments of these reference -objects, which can be useful for generating test cases for ports. To run them and get the output, use +The Rust module `espresso-types::reference_tests` contains test cases which are designed to fail if the serialization +format or commitment scheme for any of these data types changes. If you make a breaking change, you may need to update +these reference objects as well. Running those tests will also print out information about the commitments of these +reference objects, which can be useful for generating test cases for ports. To run them and get the output, use ```bash cargo test --all-features -p espresso-types -- --nocapture --test-threads 1 reference_tests diff --git a/data/v2/header.bin b/data/v2/header.bin index 6faa7f07f..93174ba7a 100644 Binary files a/data/v2/header.bin and b/data/v2/header.bin differ diff --git a/data/v3/chain_config.bin b/data/v3/chain_config.bin new file mode 100644 index 000000000..92c05b086 Binary files /dev/null and b/data/v3/chain_config.bin differ diff --git a/data/v3/chain_config.json b/data/v3/chain_config.json new file mode 100644 index 000000000..5c39b238a --- /dev/null +++ b/data/v3/chain_config.json @@ -0,0 +1,8 @@ +{ + "base_fee": "0", + "bid_recipient": "0x0000000000000000000000000000000000000000", + "chain_id": "35353", + "fee_contract": "0x0000000000000000000000000000000000000000", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "max_block_size": "10240" +} \ No newline at end of file diff --git a/data/v3/header.bin b/data/v3/header.bin index 681385142..8e50a6eba 100644 Binary files a/data/v3/header.bin and b/data/v3/header.bin differ diff --git a/data/v3/header.json b/data/v3/header.json index 5e5b6cddc..2b8fa1234 100644 --- a/data/v3/header.json +++ b/data/v3/header.json @@ -1,16 +1,24 @@ { "fields": { + "auction_results": { + "reserve_bids": [], + "view_number": 0, + "winning_bids": [] + }, "block_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAQA", "builder_commitment": "BUILDER_COMMITMENT~jlEvJoHPETCSwXF6UKcD22zOjfoHGuyVFTVkP_BNc-no", - "builder_signature": { - "r": "0xa1c3795850b7b490e616b60fead89753841fbc9fffe1a939d483f1d959ad1c45", - "s": "0x20228f5b63b14792d371dce479978e45020f19602189ef6d325b73029a2848ac", - "v": 27 - }, + "builder_signature": [ + { + "r": "0xa1c3795850b7b490e616b60fead89753841fbc9fffe1a939d483f1d959ad1c45", + "s": "0x20228f5b63b14792d371dce479978e45020f19602189ef6d325b73029a2848ac", + "v": 27 + } + ], "chain_config": { "chain_config": { "Left": { "base_fee": "0", + "bid_recipient": "0x0000000000000000000000000000000000000000", "chain_id": "35353", "fee_contract": "0x0000000000000000000000000000000000000000", "fee_recipient": "0x0000000000000000000000000000000000000000", @@ -18,10 +26,12 @@ } } }, - "fee_info": { - "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", - "amount": "0" - }, + "fee_info": [ + { + "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "amount": "0" + } + ], "fee_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAKA", "height": 42, "l1_finalized": { diff --git a/doc/espresso-dev-node.md b/doc/espresso-dev-node.md new file mode 100644 index 000000000..8dd009027 --- /dev/null +++ b/doc/espresso-dev-node.md @@ -0,0 +1,86 @@ +# Espresso Dev Node + +Espresso dev node is a node specifically designed for development and testing. It includes various nodes required to run +a complete Espresso network, such as `builder`, `sequencer`, etc. Developers can use it for development and testing. + +## Download + +We highly recommend you to use our Docker image. You can run it from the command line: + +```cmd +docker run ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:main +``` + +## Parameters + +| Name | Type | Environment Variable | Default Value | Description | +| ------------------------------- | --------------- | ------------------------------------ | ------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `rpc_url` | `Option` | `ESPRESSO_SEQUENCER_L1_PROVIDER` | Automatically launched Avil node if not provided. | The JSON-RPC endpoint of the L1. If not provided, an Avil node will be launched automatically. | +| `mnemonic` | `String` | `ESPRESSO_SEQUENCER_ETH_MNEMONIC` | `test test test test test test test test test test test junk` | Mnemonic for an L1 wallet. This wallet is used to deploy the contracts, so the account indicated by `ACCOUNT_INDEX` must be funded with ETH. | +| `account_index` | `u32` | `ESPRESSO_DEPLOYER_ACCOUNT_INDEX` | `0` | Account index of the L1 wallet generated from `MNEMONIC`. Used when deploying contracts. | +| `sequencer_api_port` | `u16` | `ESPRESSO_SEQUENCER_API_PORT` | Required | Port that the HTTP API will use. | +| `sequencer_api_max_connections` | `Option` | `ESPRESSO_SEQUENCER_MAX_CONNECTIONS` | None | Maximum concurrent connections allowed by the HTTP API server. | +| `builder_port` | `Option` | `ESPRESSO_BUILDER_PORT` | An unused port | Port for connecting to the builder. | +| `prover_port` | `Option` | `ESPRESSO_PROVER_PORT` | An unused port | Port for connecting to the prover. If this is not provided, an available port will be selected. | +| `dev_node_port` | `u16` | `ESPRESSO_DEV_NODE_PORT` | `20000` | Port for the dev node. This is used to provide tools and information to facilitate developers debugging. | + +## APIs + +Once you have successfully run the dev node, you can access the corresponding ports to call the APIs of the +[`builder`](https://docs.espressosys.com/sequencer/api-reference/builder-api), +[`sequencer`](https://docs.espressosys.com/sequencer/api-reference/sequencer-api), and `prover`. + +In addition, you can access the `dev_node_port` to retrieve debugging information. Here are the details of the dev node +API. + +### GET /api/dev-info + +This endpoint returns some debug information for you. + +An example response is like this: + +```json +{ + "builder_url": "http://localhost:41003/", + "prover_port": 23156, + "l1_url": "http://localhost:8545/", + "light_client_address": "0xb075b82c7a23e0994df4793422a1f03dbcf9136f" +} +``` + +### POST /api/set-hotshot-down + +This endpoint simulates the effect of a liveness failure of the hotshot consensus protocol in the Light Client smart +contract. + +By calling this, the L1 height in the light contract will be frozen, and rollups will detect the HotShot failure. This +is intended for testing rollups' functionalities when HotShot is down. + +An example of a `curl` command: + +```cmd +curl -X POST "http://localhost:20000/api/set-hotshot-down" \ + -H "Content-Type: application/json" \ + -d '{"height": 12345}' +``` + +Parameters + +| Name | Type | Description | +| ------ | ------- | ---------------------------------------- | +| height | integer | The L1 height from which hotshot is down | + +### POST /api/set-hotshot-up + +This endpoint simulates the effect of a liveness success of the hotshot consensus protocol in the Light Client smart +contract. + +This is intended to be used when `set-hotshot-down` has been called previously. By calling this, rollups will detect the +reactivity of HotShot. + +An example of a `curl` command: + +```cmd +curl -X POST "http://localhost:20000/api/set-hotshot-up" \ + -H "Content-Type: application/json" +``` diff --git a/docker-compose.yaml b/docker-compose.yaml index 936db67f6..381bf3900 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -524,9 +524,27 @@ services: sequencer0: condition: service_healthy + marketplace-solver: + image: ghcr.io/espressosystems/espresso-sequencer/marketplace-solver:main + ports: + - "$ESPRESSO_MARKETPLACE_SOLVER_API_PORT:$ESPRESSO_MARKETPLACE_SOLVER_API_PORT" + + environment: + - MARKETPLACE_SOLVER_POSTGRES_URL=postgresql://root:password@solver-db/solver + - ESPRESSO_MARKETPLACE_SOLVER_API_PORT + - ESPRESSO_SEQUENCER_HOTSHOT_EVENT_API_URL=http://sequencer0:$ESPRESSO_SEQUENCER_HOTSHOT_EVENT_STREAMING_API_PORT + - MARKETPLACE_SOLVER_POSTGRES_MAX_CONNECTIONS=100 + - MARKETPLACE_SOLVER_POSTGRES_ACQUIRE_TIMEOUT=5 + - RUST_LOG + depends_on: + sequencer0: + condition: service_healthy + solver-db: + condition: service_healthy + sequencer-db-0: image: postgres - user: postgres + user: root ports: - "$ESPRESSO_SEQUENCER0_DB_PORT:5432" environment: @@ -543,7 +561,7 @@ services: sequencer-db-1: image: postgres - user: postgres + user: root ports: - "$ESPRESSO_SEQUENCER1_DB_PORT:5432" environment: @@ -558,6 +576,23 @@ services: timeout: 4s retries: 20 + solver-db: + image: postgres + user: root + ports: + - "$MARKETPLACE_SOLVER_POSTGRES_PORT:5432" + environment: + - POSTGRES_PASSWORD=password + - POSTGRES_USER=root + - POSTGRES_DB=solver + healthcheck: + # Postgres can be falsely "ready" once before running init scripts. + # See https://github.com/docker-library/postgres/issues/146 for discussion. + test: "pg_isready -U root && sleep 1 && pg_isready -U root" + interval: 5s + timeout: 4s + retries: 20 + block-explorer: image: ghcr.io/espressosystems/espresso-block-explorer:main ports: diff --git a/docker/marketplace-solver.Dockerfile b/docker/marketplace-solver.Dockerfile new file mode 100644 index 000000000..c244479ec --- /dev/null +++ b/docker/marketplace-solver.Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:jammy + +ARG TARGETARCH + +RUN apt-get update \ + && apt-get install -y curl libcurl4 wait-for-it tini \ + && rm -rf /var/lib/apt/lists/* +ENTRYPOINT ["tini", "--"] + +COPY target/$TARGETARCH/release/marketplace-solver /bin/marketplace-solver +RUN chmod +x /bin/marketplace-solver + +ENV ESPRESSO_MARKETPLACE_SOLVER_API_PORT=25000 + +CMD ["/bin/marketplace-solver"] + + +HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_MARKETPLACE_SOLVER_API_PORT}/healthcheck || exit 1 +EXPOSE ${ESPRESSO_MARKETPLACE_SOLVER_API_PORT} \ No newline at end of file diff --git a/hotshot-state-prover/src/bin/state-prover.rs b/hotshot-state-prover/src/bin/state-prover.rs index 78d532926..39a122e67 100644 --- a/hotshot-state-prover/src/bin/state-prover.rs +++ b/hotshot-state-prover/src/bin/state-prover.rs @@ -1,6 +1,5 @@ use std::{str::FromStr as _, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use cld::ClDuration; use es_version::SEQUENCER_VERSION; @@ -11,6 +10,7 @@ use ethers::{ }; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover::service::{run_prover_once, run_prover_service, StateProverConfig}; +use sequencer_utils::logging; use snafu::Snafu; use url::Url; @@ -78,6 +78,9 @@ struct Args { /// Stake table capacity for the prover circuit #[clap(short, long, env = "ESPRESSO_SEQUENCER_STAKE_TABLE_CAPACITY", default_value_t = STAKE_TABLE_CAPACITY)] pub stake_table_capacity: usize, + + #[clap(flatten)] + logging: logging::Config, } #[derive(Clone, Debug, Snafu)] @@ -95,10 +98,8 @@ fn parse_duration(s: &str) -> Result { #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); - let args = Args::parse(); + args.logging.init(); // prepare config for state prover from user options let provider = Provider::::try_from(args.l1_provider.to_string()).unwrap(); diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 7b181c0d4..eaca2bd1d 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -517,7 +517,6 @@ mod test { use anyhow::Result; use ark_ed_on_bn254::EdwardsConfig; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use ethers::{ abi::AbiEncode, utils::{Anvil, AnvilInstance}, @@ -526,7 +525,7 @@ mod test { use hotshot_types::light_client::StateSignKey; use jf_signature::{schnorr::SchnorrSignatureScheme, SignatureScheme}; use jf_utils::test_rng; - use sequencer_utils::deployer; + use sequencer_utils::{deployer, test_utils::setup_test}; use super::*; use crate::mock_ledger::{MockLedger, MockSystemParam}; @@ -681,8 +680,7 @@ mod test { #[async_std::test] async fn test_read_contract_state() -> Result<()> { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = Anvil::new().spawn(); let dummy_genesis = ParsedLightClientState::dummy_genesis(); let (_wallet, contract) = deploy_contract_for_test(&anvil, dummy_genesis.clone()).await?; @@ -702,8 +700,7 @@ mod test { // This test is temporarily ignored. We are unifying the contract deployment in #1071. #[async_std::test] async fn test_submit_state_and_proof() -> Result<()> { - setup_logging(); - setup_backtrace(); + setup_test(); let (genesis, _qc_keys, state_keys, st) = init_ledger_for_test(); diff --git a/process-compose.yaml b/process-compose.yaml index 67796668b..2bbb184bf 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -448,6 +448,29 @@ processes: path: /healthcheck failure_threshold: 100 + marketplace-solver: + command: marketplace-solver + environment: + - ESPRESSO_MARKETPLACE_SOLVER_API_PORT=$ESPRESSO_MARKETPLACE_SOLVER_API_PORT + - ESPRESSO_SEQUENCER_HOTSHOT_EVENT_API_URL=http://localhost:$ESPRESSO_SEQUENCER_HOTSHOT_EVENT_STREAMING_API_PORT + - MARKETPLACE_SOLVER_POSTGRES_HOST=localhost + - MARKETPLACE_SOLVER_POSTGRES_USER=root + - MARKETPLACE_SOLVER_POSTGRES_PASSWORD=password + - MARKETPLACE_SOLVER_POSTGRES_DATABASE_NAME=solver + depends_on: + sequencer0: + condition: process_healthy + solver-db: + condition: process_healthy + + readiness_probe: + http_get: + scheme: http + host: localhost + port: $ESPRESSO_MARKETPLACE_SOLVER_API_PORT + path: /healthcheck + failure_threshold: 100 + sequencer-db-0: command: docker run -e POSTGRES_PASSWORD -e POSTGRES_USER -e POSTGRES_DB -p $ESPRESSO_SEQUENCER0_DB_PORT:5432 postgres environment: @@ -481,6 +504,23 @@ processes: # See https://github.com/docker-library/postgres/issues/146 for discussion. success_threshold: 2 failure_threshold: 20 + + solver-db: + command: docker run -e POSTGRES_PASSWORD -e POSTGRES_USER -e POSTGRES_DB -p $MARKETPLACE_SOLVER_POSTGRES_PORT:5432 postgres + environment: + - POSTGRES_PASSWORD=password + - POSTGRES_USER=root + - POSTGRES_DB=solver + readiness_probe: + exec: + command: pg_isready -h localhost -p $MARKETPLACE_SOLVER_POSTGRES_PORT + initial_delay_seconds: 5 + period_seconds: 5 + timeout_seconds: 4 + # Postgres can be falsely "ready" once before running init scripts. + # See https://github.com/docker-library/postgres/issues/146 for discussion. + success_threshold: 2 + failure_threshold: 20 block-explorer: command: docker run --rm -p $ESPRESSO_BLOCK_EXPLORER_PORT:3000 -e QUERY_SERVICE_URI ghcr.io/espressosystems/espresso-block-explorer:main diff --git a/scripts/build-docker-images b/scripts/build-docker-images index d2c779bc1..072494efb 100755 --- a/scripts/build-docker-images +++ b/scripts/build-docker-images @@ -35,7 +35,7 @@ for ARCH in "amd64" "arm64"; do ;; esac mkdir -p ${WORKDIR}/target/$ARCH/release - for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node"; do + for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node" "marketplace-solver"; do cp -v "${CARGO_TARGET_DIR}/${TARGET}/release/$binary" ${WORKDIR}/target/$ARCH/release done done @@ -62,3 +62,4 @@ docker build -t ghcr.io/espressosystems/espresso-sequencer/builder:main -f docke docker build -t ghcr.io/espressosystems/espresso-sequencer/nasty-client:main -f docker/nasty-client.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:main -f docker/espresso-dev-node.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/bridge:main -f docker/espresso-bridge.Dockerfile ${WORKDIR} +docker build -t ghcr.io/espressosystems/espresso-sequencer/marketplace-solver:main -f docker/marketplace-solver.Dockerfile ${WORKDIR} \ No newline at end of file diff --git a/scripts/build-docker-images-native b/scripts/build-docker-images-native index bb8d927d7..6037ce137 100755 --- a/scripts/build-docker-images-native +++ b/scripts/build-docker-images-native @@ -87,7 +87,7 @@ mkdir -p ${WORKDIR}/data cp -rv data/genesis ${WORKDIR}/data/ mkdir -p "${WORKDIR}/target/$ARCH/release" -for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node"; do +for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node" "marketplace-solver"; do cp -v "${CARGO_TARGET_DIR}/release/$binary" "${WORKDIR}/target/$ARCH/release" # Patch the interpreter for running without nix inside the ubuntu based docker image. if [ $KERNEL == "linux" ]; then @@ -117,3 +117,4 @@ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/nasty-client:main -f docker/nasty-client.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:main -f docker/espresso-dev-node.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/bridge:main -f docker/espresso-bridge.Dockerfile ${WORKDIR} +docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/marketplace-solver:main -f docker/marketplace-solver.Dockerfile ${WORKDIR} diff --git a/scripts/launch-dev-node-with-postgres b/scripts/launch-dev-node-with-postgres index 1aef1e556..97156754e 100644 --- a/scripts/launch-dev-node-with-postgres +++ b/scripts/launch-dev-node-with-postgres @@ -11,6 +11,9 @@ export POSTGRES_PASSWORD=$ESPRESSO_SEQUENCER_POSTGRES_PASSWORD export RUST_LOG=${RUST_LOG:-info} +# Trap SIGTERM and SIGINT signals and send them to the process group +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + # Start postgres in the background docker-entrypoint.sh postgres & @@ -22,4 +25,5 @@ until pg_isready && sleep 1 && pg_isready; do done # Start the dev node -espresso-dev-node +espresso-dev-node & +wait diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 732fce9d3..1a58df7ba 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -77,7 +77,9 @@ jf-rescue = { workspace = true } jf-signature = { workspace = true, features = ["bls", "schnorr"] } jf-vid = { workspace = true } -libp2p = { workspace = true } +libp2p = { workspace = true } +marketplace-solver = { git = "https://github.com/EspressoSystems/marketplace-solver.git", tag = "0.1.2" } +num-traits = "0.2.18" num_enum = "0.7" portpicker = { workspace = true } rand = { workspace = true } diff --git a/sequencer/api/espresso_dev_node.toml b/sequencer/api/espresso_dev_node.toml index 75ce8143f..62459439c 100644 --- a/sequencer/api/espresso_dev_node.toml +++ b/sequencer/api/espresso_dev_node.toml @@ -41,6 +41,6 @@ METHOD = "POST" DOC = """ Set the hotshot up in the light client contract. -This is intended to be used when `freeze` has been called previously. By unfreezing the L1 height, +This is intended to be used when `set-hotshot-down` has been called previously. By calling this, rollups will detect the reactivity of HotShot. """ diff --git a/sequencer/api/public-env-vars.toml b/sequencer/api/public-env-vars.toml index 85ab45d16..b214f1e2d 100644 --- a/sequencer/api/public-env-vars.toml +++ b/sequencer/api/public-env-vars.toml @@ -56,6 +56,7 @@ variables = [ "ESPRESSO_SEQUENCER_API_PEERS", "ESPRESSO_SEQUENCER_API_PORT", "ESPRESSO_SEQUENCER_ARCHIVE", + "ESPRESSO_SEQUENCER_BACKTRACE_MODE", "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_FACTOR", "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_JITTER", "ESPRESSO_SEQUENCER_CATCHUP_BASE_RETRY_DELAY", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index df2abaf67..a1ccafaa2 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -8,7 +8,7 @@ use committable::Commitment; use data_source::{CatchupDataSource, SubmitDataSource}; use derivative::Derivative; use espresso_types::{ - v0::traits::SequencerPersistence, AccountQueryData, BlockMerkleTree, ChainConfig, + v0::traits::SequencerPersistence, v0_3::ChainConfig, AccountQueryData, BlockMerkleTree, FeeAccountProof, NodeState, PubKey, Transaction, }; use ethers::prelude::Address; @@ -358,7 +358,6 @@ impl, Ver: StaticVersionType + 'static, P: Sequencer pub mod test_helpers { use std::time::Duration; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; use committable::Committable; use es_version::{SequencerVersion, SEQUENCER_VERSION}; @@ -381,6 +380,7 @@ pub mod test_helpers { use itertools::izip; use jf_merkle_tree::{MerkleCommitment, MerkleTreeScheme}; use portpicker::pick_unused_port; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use tide_disco::error::ServerError; @@ -608,8 +608,7 @@ pub mod test_helpers { /// to test a different initialization path) but should not remove or modify the existing /// functionality (e.g. removing the status module or changing the port). pub async fn status_test_helper(opt: impl FnOnce(Options) -> Options) { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let url = format!("http://localhost:{port}").parse().unwrap(); @@ -658,8 +657,7 @@ pub mod test_helpers { /// to test a different initialization path) but should not remove or modify the existing /// functionality (e.g. removing the submit module or changing the port). pub async fn submit_test_helper(opt: impl FnOnce(Options) -> Options) { - setup_logging(); - setup_backtrace(); + setup_test(); let txn = Transaction::new(NamespaceId::from(1_u32), vec![1, 2, 3, 4]); @@ -696,8 +694,7 @@ pub mod test_helpers { /// Test the state signature API. pub async fn state_signature_test_helper(opt: impl FnOnce(Options) -> Options) { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); @@ -740,8 +737,7 @@ pub mod test_helpers { /// to test a different initialization path) but should not remove or modify the existing /// functionality (e.g. removing the catchup module or changing the port). pub async fn catchup_test_helper(opt: impl FnOnce(Options) -> Options) { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let url = format!("http://localhost:{port}").parse().unwrap(); @@ -832,7 +828,6 @@ pub mod test_helpers { #[cfg(test)] #[espresso_macros::generic_tests] mod api_tests { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use committable::Committable; use data_source::testing::TestableSequencerDataSource; use endpoints::NamespaceProofQueryData; @@ -842,6 +837,7 @@ mod api_tests { use futures::stream::StreamExt; use hotshot_query_service::availability::{LeafQueryData, VidCommonQueryData}; use portpicker::pick_unused_port; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, @@ -873,8 +869,7 @@ mod api_tests { #[async_std::test] pub(crate) async fn test_namespace_query() { - setup_logging(); - setup_backtrace(); + setup_test(); // Arbitrary transaction, arbitrary namespace ID let ns_id = NamespaceId::from(42_u32); @@ -979,9 +974,7 @@ mod api_tests { pub(crate) async fn test_hotshot_event_streaming() { use HotshotEvents; - setup_logging(); - - setup_backtrace(); + setup_test(); let hotshot_event_streaming_port = pick_unused_port().expect("No ports free for hotshot event streaming"); @@ -1037,7 +1030,6 @@ mod api_tests { mod test { use std::time::Duration; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; use committable::{Commitment, Committable}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; @@ -1066,6 +1058,7 @@ mod test { }; use jf_merkle_tree::prelude::{MerkleProof, Sha3Node}; use portpicker::pick_unused_port; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, @@ -1087,8 +1080,7 @@ mod test { #[async_std::test] async fn test_healthcheck() { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let url = format!("http://localhost:{port}").parse().unwrap(); @@ -1130,8 +1122,7 @@ mod test { #[async_std::test] async fn test_merklized_state_api() { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); @@ -1204,8 +1195,7 @@ mod test { #[async_std::test] async fn test_catchup() { - setup_logging(); - setup_backtrace(); + setup_test(); // Start a sequencer network, using the query service for catchup. let port = pick_unused_port().expect("No ports free"); @@ -1304,8 +1294,7 @@ mod test { // This test uses a ValidatedState which only has the default chain config commitment. // The NodeState has the full chain config. // Both chain config commitments will match, so the ValidatedState should have the full chain config after a non-genesis block is decided. - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let anvil = Anvil::new().spawn(); @@ -1361,8 +1350,7 @@ mod test { // However, for this test to work, at least one node should have a full chain config // to allow other nodes to catch up. - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let anvil = Anvil::new().spawn(); @@ -1434,8 +1422,7 @@ mod test { #[async_std::test] async fn test_chain_config_upgrade() { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let anvil = Anvil::new().spawn(); @@ -1540,8 +1527,7 @@ mod test { #[async_std::test] pub(crate) async fn test_restart() { - setup_logging(); - setup_backtrace(); + setup_test(); const NUM_NODES: usize = 5; // Initialize nodes. @@ -1680,8 +1666,7 @@ mod test { #[async_std::test] async fn test_fetch_config() { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let url: surf_disco::Url = format!("http://localhost:{port}").parse().unwrap(); diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 0755b63a4..051879521 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -5,7 +5,8 @@ use async_trait::async_trait; use committable::Commitment; use espresso_types::{ v0::traits::{PersistenceOptions, SequencerPersistence}, - ChainConfig, PubKey, Transaction, + v0_3::ChainConfig, + PubKey, Transaction, }; use ethers::prelude::Address; use futures::future::Future; diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 7aee88ba5..409890352 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -1,7 +1,7 @@ use anyhow::{bail, Context}; use async_trait::async_trait; use committable::Commitment; -use espresso_types::{BlockMerkleTree, ChainConfig, FeeAccountProof, FeeMerkleTree}; +use espresso_types::{v0_3::ChainConfig, BlockMerkleTree, FeeAccountProof, FeeMerkleTree}; use ethers::prelude::Address; use futures::FutureExt; use hotshot_query_service::{ diff --git a/sequencer/src/bin/commitment-task.rs b/sequencer/src/bin/commitment-task.rs index 1e29526c0..d93c7abc4 100644 --- a/sequencer/src/bin/commitment-task.rs +++ b/sequencer/src/bin/commitment-task.rs @@ -1,6 +1,5 @@ use std::{io, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::spawn; use clap::Parser; use es_version::SEQUENCER_VERSION; @@ -10,6 +9,7 @@ use sequencer::{ hotshot_commitment::{run_hotshot_commitment_task, CommitmentTaskOptions}, options::parse_duration, }; +use sequencer_utils::logging; use tide_disco::{error::ServerError, Api}; use url::Url; use vbs::version::StaticVersionType; @@ -58,13 +58,14 @@ pub struct Options { /// If specified, sequencing attempts will be delayed by duration sampled from an exponential distribution with mean DELAY. #[clap(long, name = "DELAY", value_parser = parse_duration, env = "ESPRESSO_COMMITMENT_TASK_DELAY")] pub delay: Option, + + #[clap(flatten)] + logging: logging::Config, } #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); - let opt = Options::parse(); + opt.logging.init(); if let Some(port) = opt.port { start_http_server(port, opt.hotshot_address, SEQUENCER_VERSION).unwrap(); @@ -110,17 +111,16 @@ fn start_http_server( #[cfg(test)] mod test { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; use portpicker::pick_unused_port; + use sequencer_utils::test_utils::setup_test; use surf_disco::Client; use super::{start_http_server, Address, ServerError}; #[async_std::test] async fn test_get_hotshot_contract() { - setup_logging(); - setup_backtrace(); + setup_test(); let port = pick_unused_port().expect("No ports free"); let expected_addr = "0xED15E1FE0789c524398137a066ceb2EF9884E5D8" diff --git a/sequencer/src/bin/count-transactions.rs b/sequencer/src/bin/count-transactions.rs deleted file mode 100644 index 6d028bf37..000000000 --- a/sequencer/src/bin/count-transactions.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Utility program to count transactions sequenced by HotShot. - -use std::{cmp::max, collections::HashSet, time::Duration}; - -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_std::task::sleep; -use clap::Parser; -use committable::Committable; -use es_version::SequencerVersion; -use espresso_types::SeqTypes; -use futures::future::join_all; -use hotshot_query_service::availability::BlockQueryData; -use surf_disco::Url; - -/// Utility program to count transactions sequenced by HotShot. -#[derive(Clone, Debug, Parser)] -struct Options { - /// Start counting from block FROM. - #[clap(long, name = "FROM")] - from: Option, - - /// Stop counting at block TO. - #[clap(long, name = "TO")] - to: Option, - - /// Number of parallel tasks to run. - #[clap(short, long, default_value = "1")] - jobs: usize, - - /// URL of the HotShot query service. - url: Url, -} - -#[async_std::main] -async fn main() { - setup_logging(); - setup_backtrace(); - - let opt = Options::parse(); - let client = surf_disco::Client::::new(opt.url); - client.connect(None).await; - - let block_height: u64 = client - .get("status/latest_block_height") - .send() - .await - .unwrap(); - let from = opt.from.unwrap_or(0); - let to = max(opt.to.unwrap_or(block_height), from); - - let (totals, uniques): (Vec<_>, Vec<_>) = join_all((0..opt.jobs).map(|i| { - let client = client.clone(); - async move { - let num_blocks = (to - from) / (opt.jobs as u64); - let offset = i as u64 * num_blocks; - let from = from + offset; - let to = if i + 1 == opt.jobs { - to - } else { - from + num_blocks - }; - tracing::info!("task {i} counting blocks {from}-{to}"); - - let mut total = 0; - let mut unique = HashSet::new(); - for height in from..to { - tracing::info!("task {i} processing block {height}/{to}"); - let block: BlockQueryData = loop { - match client - .get(&format!("availability/block/{height}")) - .send() - .await - { - Ok(block) => break block, - Err(err) => { - tracing::error!("task {i} error fetching block {height}: {err}"); - - // Back off a bit and then retry. - sleep(Duration::from_millis(100)).await; - } - } - }; - for (_, txn) in block.enumerate() { - total += 1; - unique.insert(txn.commit()); - } - } - (total, unique.len()) - } - })) - .await - .into_iter() - .unzip(); - - let total: usize = totals.into_iter().sum(); - let unique: usize = uniques.into_iter().sum(); - - println!("For blocks {from}-{to}:"); - println!("Total transactions: {total}"); - println!("Unique: {unique}"); -} diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 4f64ada92..5552ff5d8 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -1,11 +1,13 @@ use std::{fs::File, io::stdout, path::PathBuf}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use futures::FutureExt; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover::service::light_client_genesis; -use sequencer_utils::deployer::{deploy, ContractGroup, Contracts, DeployedContracts}; +use sequencer_utils::{ + deployer::{deploy, ContractGroup, Contracts, DeployedContracts}, + logging, +}; use url::Url; /// Deploy contracts needed to run the sequencer. @@ -84,14 +86,16 @@ struct Options { /// Stake table capacity for the prover circuit #[clap(short, long, env = "ESPRESSO_SEQUENCER_STAKE_TABLE_CAPACITY", default_value_t = STAKE_TABLE_CAPACITY)] pub stake_table_capacity: usize, + + #[clap(flatten)] + logging: logging::Config, } #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let opt = Options::parse(); + opt.logging.init(); + let contracts = Contracts::from(opt.contracts); let sequencer_url = opt.sequencer_url.clone(); diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 22546214b..2e8fe0367 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -1,9 +1,8 @@ use std::time::Duration; use anyhow::{bail, ensure, Context}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{sync::Arc, task::sleep}; -use clap::Parser; +use clap::{Parser, Subcommand}; use contract_bindings::fee_contract::FeeContract; use es_version::SequencerVersion; use espresso_types::{eth_signature_key::EthKeyPair, FeeAccount, FeeAmount, FeeMerkleTree, Header}; @@ -17,6 +16,7 @@ use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, }; +use sequencer_utils::logging; use surf_disco::{error::ClientError, Url}; type EspressoClient = surf_disco::Client; @@ -25,6 +25,15 @@ type FeeMerkleProof = MerkleProof anyhow::Result<()> { - setup_logging(); - setup_backtrace(); + let opt = Options::parse(); + opt.logging.init(); - match Command::parse() { + match opt.command { Command::Deposit(opt) => deposit(opt).await, Command::Balance(opt) => balance(opt).await, Command::L1Balance(opt) => l1_balance(opt).await, diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 8247588c1..b1cd46a39 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -1,6 +1,5 @@ use std::{io, sync::Arc, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::spawn; use clap::Parser; use contract_bindings::light_client_mock::LightClientMock; @@ -28,7 +27,7 @@ use sequencer::{ }; use sequencer_utils::{ deployer::{deploy, Contract, Contracts}, - AnvilOptions, + logging, AnvilOptions, }; use serde::{Deserialize, Serialize}; use tide_disco::{error::ServerError, Api, Error as _, StatusCode}; @@ -85,14 +84,16 @@ struct Args { #[clap(flatten)] sql: persistence::sql::Options, + + #[clap(flatten)] + logging: logging::Config, } #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let cli_params = Args::parse(); + cli_params.logging.init(); + let api_options = options::Options::from(options::Http { port: cli_params.sequencer_api_port, max_connections: cli_params.sequencer_api_max_connections, @@ -299,7 +300,6 @@ struct SetHotshotUpBody { mod tests { use std::{process::Child, sync::Arc, time::Duration}; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{stream::StreamExt, task::sleep}; use committable::{Commitment, Committable}; use contract_bindings::light_client::LightClient; @@ -318,7 +318,7 @@ mod tests { use jf_merkle_tree::MerkleTreeScheme; use portpicker::pick_unused_port; use sequencer::api::endpoints::NamespaceProofQueryData; - use sequencer_utils::{init_signer, AnvilOptions}; + use sequencer_utils::{init_signer, test_utils::setup_test, AnvilOptions}; use surf_disco::Client; use tide_disco::error::ServerError; @@ -341,8 +341,7 @@ mod tests { // - Types (like `Header`) update #[async_std::test] async fn dev_node_test() { - setup_logging(); - setup_backtrace(); + setup_test(); let builder_port = pick_unused_port().unwrap(); diff --git a/sequencer/src/bin/keygen.rs b/sequencer/src/bin/keygen.rs index 0274db37e..79e6c3717 100644 --- a/sequencer/src/bin/keygen.rs +++ b/sequencer/src/bin/keygen.rs @@ -7,13 +7,13 @@ use std::{ }; use anyhow::anyhow; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::{Parser, ValueEnum}; use derive_more::Display; use ethers::utils::hex; use hotshot::types::SignatureKey; use hotshot_types::{light_client::StateKeyPair, signature_key::BLSPubKey}; use rand::{RngCore, SeedableRng}; +use sequencer_utils::logging; use tracing::info_span; #[derive(Clone, Copy, Debug, Display, Default, ValueEnum)] @@ -100,6 +100,9 @@ struct Options { /// called .seed. #[clap(short, long, name = "OUT")] out: PathBuf, + + #[clap(flatten)] + logging: logging::Config, } fn parse_seed(s: &str) -> Result<[u8; 32], anyhow::Error> { @@ -118,10 +121,8 @@ fn gen_default_seed() -> [u8; 32] { } fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let opts = Options::parse(); + opts.logging.init(); tracing::debug!( "Generating {} keypairs with scheme {}", diff --git a/sequencer/src/bin/marketplace-solver.rs b/sequencer/src/bin/marketplace-solver.rs new file mode 100644 index 000000000..5b9648d92 --- /dev/null +++ b/sequencer/src/bin/marketplace-solver.rs @@ -0,0 +1,90 @@ +use std::{str::FromStr, sync::Arc}; + +use anyhow::Context; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_std::{sync::RwLock, task::spawn}; +use clap::Parser; +use espresso_types::SeqTypes; +use hotshot_types::traits::node_implementation::NodeType; +use marketplace_solver::{ + define_api, handle_events, + state::{GlobalState, SolverState, StakeTable}, + DatabaseOptions, EventsServiceClient, SolverError, +}; +use tide_disco::App; +use url::Url; +use vbs::version::StaticVersionType; + +type Version = ::Base; + +#[derive(Parser)] +struct Args { + /// Port to run the server on. + #[clap(short, long, env = "ESPRESSO_MARKETPLACE_SOLVER_API_PORT")] + solver_api_port: u16, + + /// Hotshot events service api URL + #[clap(short, long, env = "ESPRESSO_SEQUENCER_HOTSHOT_EVENT_API_URL")] + events_api_url: String, + + #[clap(flatten)] + database_options: DatabaseOptions, +} + +#[async_std::main] +async fn main() -> anyhow::Result<()> { + setup_logging(); + setup_backtrace(); + + let args = Args::parse(); + let Args { + solver_api_port, + events_api_url, + database_options, + } = args; + + let events_api_url = Url::from_str(&format!("{events_api_url}/hotshot-events"))?; + + let events_client = EventsServiceClient::new(events_api_url.clone()).await; + + let startup_info = events_client + .get_startup_info() + .await + .context("failed to get startup info ")?; + + let event_stream = events_client + .get_event_stream() + .await + .context("failed to get event stream")?; + + let database = database_options + .connect() + .await + .context("failed to connect to database")?; + + let solver_state = SolverState { + stake_table: StakeTable { + known_nodes_with_stake: startup_info.known_node_with_stake, + }, + bid_txs: Default::default(), + }; + + let global_state = Arc::new(RwLock::new(GlobalState::new(database, solver_state)?)); + + let event_handler = spawn(handle_events(event_stream, global_state.clone())); + + let mut app = App::<_, SolverError>::with_state(global_state); + + let mut api = define_api(Default::default())?; + api.with_version(env!("CARGO_PKG_VERSION").parse()?); + + app.register_module::("marketplace-solver", api)?; + + app.serve(format!("0.0.0.0:{}", solver_api_port), Version::instance()) + .await + .unwrap(); + + event_handler.cancel().await; + + Ok(()) +} diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index 1e7ab8642..26b15b98f 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -22,7 +22,6 @@ use std::{ }; use anyhow::{bail, ensure, Context}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{ sync::RwLock, task::{sleep, spawn}, @@ -31,7 +30,7 @@ use clap::Parser; use committable::Committable; use derivative::Derivative; use es_version::{SequencerVersion, SEQUENCER_VERSION}; -use espresso_types::{BlockMerkleTree, FeeMerkleTree, Header, SeqTypes}; +use espresso_types::{v0_3::IterableFeeInfo, BlockMerkleTree, FeeMerkleTree, Header, SeqTypes}; use futures::{ future::{FutureExt, TryFuture, TryFutureExt}, stream::{Peekable, StreamExt}, @@ -47,6 +46,7 @@ use jf_merkle_tree::{ }; use rand::{seq::SliceRandom, RngCore}; use sequencer::{api::endpoints::NamespaceProofQueryData, options::parse_duration}; +use sequencer_utils::logging; use serde::de::DeserializeOwned; use strum::{EnumDiscriminants, VariantArray}; use surf_disco::{error::ClientError, socket, Error, StatusCode, Url}; @@ -76,6 +76,9 @@ struct Options { #[clap(flatten)] distribution: ActionDistribution, + + #[clap(flatten)] + logging: logging::Config, } #[derive(Clone, Copy, Debug, Parser)] @@ -929,7 +932,10 @@ impl ResourceManager
{ .await }) .await?; - let builder_address = builder_header.fee_info().account(); + + // Since we have multiple fee accounts, we need to select one. + let accounts = builder_header.fee_info().accounts(); + let builder_address = accounts.first().unwrap(); // Get the header of the state snapshot we're going to query so we can later verify our // results. @@ -1268,10 +1274,9 @@ async fn serve(port: u16, metrics: PrometheusMetrics) { #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); - let opt = Options::parse(); + opt.logging.init(); + let metrics = PrometheusMetrics::default(); let total_actions = metrics.create_counter("total_actions".into(), None); let failed_actions = metrics.create_counter("failed_actions".into(), None); diff --git a/sequencer/src/bin/orchestrator.rs b/sequencer/src/bin/orchestrator.rs index 935c92437..fed39f64c 100644 --- a/sequencer/src/bin/orchestrator.rs +++ b/sequencer/src/bin/orchestrator.rs @@ -1,6 +1,5 @@ use std::{num::NonZeroUsize, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use derive_more::From; use espresso_types::PubKey; @@ -10,6 +9,7 @@ use hotshot_orchestrator::{ run_orchestrator, }; use sequencer::options::{parse_duration, Ratio}; +use sequencer_utils::logging; use snafu::Snafu; use url::Url; use vec1::Vec1; @@ -92,6 +92,9 @@ struct Args { value_parser = parse_duration )] builder_timeout: Duration, + + #[clap(flatten)] + logging: logging::Config, } #[derive(Debug, Snafu, From)] @@ -110,9 +113,9 @@ fn parse_seed(s: &str) -> Result<[u8; 32], ParseSeedError> { #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); let args = Args::parse(); + args.logging.init(); + let mut config = NetworkConfig:: { start_delay_seconds: args.start_delay.as_secs(), manual_start_password: args.manual_start_password, diff --git a/sequencer/src/bin/reset-storage.rs b/sequencer/src/bin/reset-storage.rs index 0195635ad..3e34037d6 100644 --- a/sequencer/src/bin/reset-storage.rs +++ b/sequencer/src/bin/reset-storage.rs @@ -1,17 +1,26 @@ -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use clap::Parser; +use clap::{Parser, Subcommand}; use hotshot_query_service::data_source::VersionedDataSource; use sequencer::{ api::data_source::{DataSourceOptions, SequencerDataSource}, persistence, }; +use sequencer_utils::logging; /// Reset the persistent storage of a sequencer. /// /// This will remove all the persistent storage of a sequencer node, effectively resetting it to /// its genesis state. Do not run this program while the sequencer is running. #[derive(Clone, Debug, Parser)] -enum Options { +struct Options { + #[clap(flatten)] + logging: logging::Config, + + #[command(subcommand)] + command: Command, +} + +#[derive(Clone, Debug, Subcommand)] +enum Command { /// Reset file system storage. Fs(persistence::fs::Options), /// Reset SQL storage. @@ -20,16 +29,15 @@ enum Options { #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - let opt = Options::parse(); - match opt { - Options::Fs(opt) => { + opt.logging.init(); + + match opt.command { + Command::Fs(opt) => { tracing::warn!("resetting file system storage {opt:?}"); reset_storage(opt).await } - Options::Sql(opt) => { + Command::Sql(opt) => { tracing::warn!("resetting SQL storage {opt:?}"); reset_storage(*opt).await } diff --git a/sequencer/src/bin/state-relay-server.rs b/sequencer/src/bin/state-relay-server.rs index 5b77ec490..615e668f9 100644 --- a/sequencer/src/bin/state-relay-server.rs +++ b/sequencer/src/bin/state-relay-server.rs @@ -1,9 +1,9 @@ -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use es_version::SEQUENCER_VERSION; use ethers::types::U256; use hotshot_state_prover::service::one_honest_threshold; use sequencer::state_signature::relay_server::run_relay_server; +use sequencer_utils::logging; #[derive(Parser)] struct Args { @@ -25,14 +25,16 @@ struct Args { default_value = "5" )] total_stake: u64, + + #[clap(flatten)] + logging: logging::Config, } #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); - let args = Args::parse(); + args.logging.init(); + let threshold = one_honest_threshold(U256::from(args.total_stake)); tracing::info!( diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 60b728011..183c5fe3d 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -5,7 +5,6 @@ use std::{ time::{Duration, Instant}, }; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::{sleep, spawn}; use clap::Parser; use committable::{Commitment, Committable}; @@ -21,6 +20,7 @@ use rand::{Rng, RngCore, SeedableRng}; use rand_chacha::ChaChaRng; use rand_distr::Distribution; use sequencer::options::{parse_duration, parse_size}; +use sequencer_utils::logging; use surf_disco::{Client, Url}; use tide_disco::{error::ServerError, App}; use vbs::version::StaticVersionType; @@ -145,6 +145,9 @@ struct Options { #[cfg(feature = "benchmarking")] #[clap(short, long, env = "ESPRESSO_BENCH_END_BLOCK")] benchmark_end_block: NonZeroUsize, + + #[clap(flatten)] + logging: logging::Config, } impl Options { @@ -160,10 +163,9 @@ impl Options { #[async_std::main] async fn main() { - setup_backtrace(); - setup_logging(); - let opt = Options::parse(); + opt.logging.init(); + tracing::warn!("starting load generator for sequencer {}", opt.url); let (sender, mut receiver) = mpsc::channel(opt.channel_bound); diff --git a/sequencer/src/bin/verify-headers.rs b/sequencer/src/bin/verify-headers.rs index d0a8a7aa2..fbaaaba41 100644 --- a/sequencer/src/bin/verify-headers.rs +++ b/sequencer/src/bin/verify-headers.rs @@ -2,13 +2,13 @@ use std::{cmp::max, process::exit, time::Duration}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{sync::Arc, task::sleep}; use clap::Parser; use espresso_types::{Header, L1BlockInfo}; use ethers::prelude::*; use futures::future::join_all; use itertools::Itertools; +use sequencer_utils::logging; use surf_disco::Url; use vbs::version::StaticVersionType; @@ -48,6 +48,9 @@ struct Options { /// URL of the HotShot query service. url: Url, + + #[clap(flatten)] + logging: logging::Config, } type SequencerClient = surf_disco::Client; @@ -163,10 +166,9 @@ async fn get_l1_block(l1: &Provider, height: u64) -> L1BlockInfo { #[async_std::main] async fn main() { - setup_logging(); - setup_backtrace(); - let opt = Arc::new(Options::parse()); + opt.logging.init(); + let seq = Arc::new(SequencerClient::::new( opt.url.clone(), )); diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index 9c08bafd3..035b1809f 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -6,7 +6,8 @@ use async_trait::async_trait; use committable::Commitment; use espresso_types::{ v0::traits::{PersistenceOptions, StateCatchup}, - AccountQueryData, BackoffParams, BlockMerkleTree, ChainConfig, FeeAccount, FeeMerkleCommitment, + v0_3::ChainConfig, + AccountQueryData, BackoffParams, BlockMerkleTree, FeeAccount, FeeMerkleCommitment, }; use futures::future::FutureExt; use hotshot_orchestrator::config::NetworkConfig; diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 8efc6db62..c0983d836 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -25,7 +25,11 @@ use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, data::ViewNumber, - traits::{election::Membership, metrics::Metrics, network::ConnectedNetwork}, + traits::{ + election::Membership, + metrics::Metrics, + network::{ConnectedNetwork, Topic}, + }, }; use url::Url; use vbs::version::StaticVersionType; @@ -98,12 +102,14 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp let committee_membership = GeneralStaticCommittee::create_election( config.known_nodes_with_stake.clone(), config.known_nodes_with_stake.clone(), + Topic::Global, 0, ); let da_membership = GeneralStaticCommittee::create_election( config.known_nodes_with_stake.clone(), config.known_da_nodes.clone(), + Topic::Da, 0, ); diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 703f75215..fc5440cc1 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::Context; use espresso_types::{ - ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade, UpgradeType, + v0_3::ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade, UpgradeType, }; use serde::{Deserialize, Serialize}; use vbs::version::Version; @@ -265,7 +265,8 @@ mod test { max_block_size: 30000.into(), base_fee: 1.into(), fee_recipient: FeeAccount::default(), - fee_contract: Some(Address::default()) + fee_contract: Some(Address::default()), + bid_recipient: None } ); assert_eq!( @@ -329,6 +330,7 @@ mod test { max_block_size: 30000.into(), base_fee: 1.into(), fee_recipient: FeeAccount::default(), + bid_recipient: None, fee_contract: None, } ); diff --git a/sequencer/src/hotshot_commitment.rs b/sequencer/src/hotshot_commitment.rs index 46efa54ac..3b220e02a 100644 --- a/sequencer/src/hotshot_commitment.rs +++ b/sequencer/src/hotshot_commitment.rs @@ -275,7 +275,6 @@ fn build_sequence_batches_txn( #[cfg(test)] mod test { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::spawn; use committable::Committable; use contract_bindings::hot_shot::{NewBlocksCall, NewBlocksFilter}; @@ -283,7 +282,10 @@ mod test { use ethers::{abi::AbiDecode, providers::Middleware}; use futures::FutureExt; use hotshot_types::simple_certificate::QuorumCertificate; - use sequencer_utils::{test_utils::TestL1System, AnvilOptions}; + use sequencer_utils::{ + test_utils::{setup_test, TestL1System}, + AnvilOptions, + }; use surf_disco::{Error, StatusCode}; use super::*; @@ -356,8 +358,7 @@ mod test { #[async_std::test] async fn test_sequencer_task() { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = AnvilOptions::default().spawn().await; @@ -431,8 +432,7 @@ mod test { #[async_std::test] async fn test_idempotency() { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = AnvilOptions::default().spawn().await; @@ -496,8 +496,7 @@ mod test { #[async_std::test] async fn test_error_handling() { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = AnvilOptions::default().spawn().await; diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 1793ee6eb..1e78c9c8b 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -38,7 +38,7 @@ pub use genesis::Genesis; use hotshot::traits::implementations::{CombinedNetworks, Libp2pNetwork}; use hotshot::{ traits::implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, KeyPair, MemoryNetwork, PushCdnNetwork, Topic, + derive_libp2p_peer_id, CdnMetricsValue, CdnTopic, KeyPair, MemoryNetwork, PushCdnNetwork, WrappedSignatureKey, }, types::SignatureKey, @@ -53,7 +53,7 @@ use hotshot_types::{ signature_key::{BLSPrivKey, BLSPubKey}, traits::{ metrics::Metrics, - network::ConnectedNetwork, + network::{ConnectedNetwork, Topic}, node_implementation::{NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, StakeTableEntryType}, }, @@ -249,9 +249,9 @@ pub async fn init_node( // If we are a DA node, we need to subscribe to the DA topic let topics = { - let mut topics = vec![Topic::Global]; + let mut topics = vec![CdnTopic::Global]; if is_da { - topics.push(Topic::Da); + topics.push(CdnTopic::Da); } topics }; @@ -646,17 +646,25 @@ pub mod testing { ) -> SequencerContext { let mut config = self.config.clone(); let my_peer_config = &config.known_nodes_with_stake[i]; + let is_da = config.known_da_nodes.contains(my_peer_config); config.my_own_validator_config = ValidatorConfig { public_key: my_peer_config.stake_table_entry.stake_key, private_key: self.priv_keys[i].clone(), stake_value: my_peer_config.stake_table_entry.stake_amount.as_u64(), state_key_pair: self.state_key_pairs[i].clone(), - is_da: config.known_da_nodes.contains(my_peer_config), + is_da, + }; + + let topics = if is_da { + vec![Topic::Global, Topic::Da] + } else { + vec![Topic::Global] }; let network = Arc::new(MemoryNetwork::new( - config.my_own_validator_config.public_key, + &config.my_own_validator_config.public_key, &self.master_map, + &topics, None, )); @@ -740,8 +748,6 @@ pub mod testing { #[cfg(test)] mod test { - - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use es_version::SequencerVersion; use espresso_types::{Header, NamespaceId, Payload, Transaction}; use futures::StreamExt; @@ -752,7 +758,7 @@ mod test { vid_commitment, BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES, }, }; - use sequencer_utils::AnvilOptions; + use sequencer_utils::{test_utils::setup_test, AnvilOptions}; use testing::{wait_for_decide_on_handle, TestConfigBuilder}; use self::testing::run_test_builder; @@ -760,8 +766,7 @@ mod test { #[async_std::test] async fn test_skeleton_instantiation() { - setup_logging(); - setup_backtrace(); + setup_test(); let ver = SequencerVersion::instance(); // Assign `config` so it isn't dropped early. let anvil = AnvilOptions::default().spawn().await; @@ -801,8 +806,7 @@ mod test { #[async_std::test] async fn test_header_invariants() { - setup_logging(); - setup_backtrace(); + setup_test(); let success_height = 30; let ver = SequencerVersion::instance(); diff --git a/sequencer/src/main.rs b/sequencer/src/main.rs index 1142e5c58..caf3be304 100644 --- a/sequencer/src/main.rs +++ b/sequencer/src/main.rs @@ -1,6 +1,5 @@ use std::net::ToSocketAddrs; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use es_version::SEQUENCER_VERSION; use futures::future::FutureExt; @@ -15,13 +14,11 @@ use vbs::version::StaticVersionType; #[async_std::main] async fn main() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); - - tracing::warn!("sequencer starting up"); let opt = Options::parse(); + opt.logging.init(); + let mut modules = opt.modules(); - tracing::warn!("modules: {:?}", modules); + tracing::warn!(?modules, "sequencer starting up"); if let Some(storage) = modules.storage_fs.take() { init_with_storage(modules, opt, storage, SEQUENCER_VERSION).await @@ -176,6 +173,7 @@ mod test { genesis::StakeTableConfig, persistence::fs, }; + use sequencer_utils::test_utils::setup_test; use surf_disco::{error::ClientError, Client, Url}; use tempfile::TempDir; @@ -183,8 +181,7 @@ mod test { #[async_std::test] async fn test_startup_before_orchestrator() { - setup_logging(); - setup_backtrace(); + setup_test(); let (pub_key, priv_key) = PubKey::generated_from_seed_indexed([0; 32], 0); let state_key = StateKeyPair::generate_from_seed_indexed([0; 32], 0); diff --git a/sequencer/src/message_compat_tests.rs b/sequencer/src/message_compat_tests.rs index a5ddc469c..1ddfe19a7 100644 --- a/sequencer/src/message_compat_tests.rs +++ b/sequencer/src/message_compat_tests.rs @@ -54,10 +54,17 @@ type Serializer = vbs::Serializer; #[cfg(feature = "testing")] async fn test_message_compat() { use espresso_types::{Payload, SeqTypes, Transaction}; + use hotshot_types::traits::network::Topic; let (sender, priv_key) = PubKey::generated_from_seed_indexed(Default::default(), 0); let signature = PubKey::sign(&priv_key, &[]).unwrap(); - let membership = GeneralStaticCommittee::new(&[], vec![sender.stake_table_entry(1)], vec![], 0); + let membership = GeneralStaticCommittee::new( + &[], + vec![sender.stake_table_entry(1)], + vec![], + 0, + Topic::Global, + ); let upgrade_data = UpgradeProposalData { old_version: Version { major: 0, minor: 1 }, new_version: Version { major: 1, minor: 0 }, diff --git a/sequencer/src/network/cdn.rs b/sequencer/src/network/cdn.rs index a3e6b7074..b3b7a667b 100644 --- a/sequencer/src/network/cdn.rs +++ b/sequencer/src/network/cdn.rs @@ -8,8 +8,11 @@ use cdn_broker::reexports::{ def::{ConnectionDef, RunDef, Topic as TopicTrait}, discovery::{Embedded, Redis}, }; -use hotshot::{traits::implementations::Topic as HotShotTopic, types::SignatureKey}; -use hotshot_types::{traits::node_implementation::NodeType, utils::bincode_opts}; +use hotshot::types::SignatureKey; +use hotshot_types::{ + traits::{network::Topic as HotShotTopic, node_implementation::NodeType}, + utils::bincode_opts, +}; use num_enum::{IntoPrimitive, TryFromPrimitive}; use static_assertions::const_assert_eq; diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index d8384bacb..21c3316cc 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -1,4 +1,5 @@ use core::fmt::Display; +use sequencer_utils::logging; use std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -201,6 +202,9 @@ pub struct Options { /// Exponential backoff for fetching missing state from peers. #[clap(flatten)] pub catchup_backoff: BackoffParams, + + #[clap(flatten)] + pub logging: logging::Config, } impl Options { diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 0ba888e91..6611e2e75 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -10,7 +10,7 @@ use async_trait::async_trait; use committable::Commitment; -use espresso_types::ChainConfig; +use espresso_types::v0_3::ChainConfig; pub mod fs; pub mod no_storage; @@ -44,10 +44,8 @@ mod testing { #[cfg(test)] #[espresso_macros::generic_tests] mod persistence_tests { - use std::collections::BTreeMap; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use committable::Committable; use espresso_types::{Leaf, NodeState, PubKey, SeqTypes, ValidatedState}; use hotshot::types::{BLSPubKey, SignatureKey}; @@ -60,14 +58,14 @@ mod persistence_tests { vid::vid_scheme, }; use jf_vid::VidScheme; + use sequencer_utils::test_utils::setup_test; use testing::TestablePersistence; use super::*; #[async_std::test] pub async fn test_anchor_leaf() { - setup_logging(); - setup_backtrace(); + setup_test(); let tmp = P::tmp_storage().await; let mut storage = P::connect(&tmp).await; @@ -106,8 +104,7 @@ mod persistence_tests { #[async_std::test] pub async fn test_voted_view() { - setup_logging(); - setup_backtrace(); + setup_test(); let tmp = P::tmp_storage().await; let mut storage = P::connect(&tmp).await; @@ -150,8 +147,7 @@ mod persistence_tests { #[async_std::test] pub async fn test_append_and_collect_garbage() { - setup_logging(); - setup_backtrace(); + setup_test(); let tmp = P::tmp_storage().await; let mut storage = P::connect(&tmp).await; diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index c2f37045c..6044dce85 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -4,7 +4,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::{bail, ensure, Context}; use async_std::{stream::StreamExt, sync::RwLock}; use espresso_types::{ - BlockMerkleTree, ChainConfig, Delta, FeeAccount, FeeMerkleTree, ValidatedState, + v0_3::ChainConfig, BlockMerkleTree, Delta, FeeAccount, FeeMerkleTree, ValidatedState, }; use futures::future::Future; use hotshot::traits::ValidatedState as HotShotState; @@ -289,10 +289,9 @@ impl SequencerStateDataSource for T where #[cfg(test)] mod test { - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use espresso_types::{ - validate_proposal, BlockSize, FeeAccount, FeeAccountProof, FeeAmount, FeeError, FeeInfo, - FeeMerkleProof, Leaf, ProposalValidationError, + v0_3::IterableFeeInfo, validate_proposal, BlockSize, FeeAccount, FeeAccountProof, + FeeAmount, FeeError, FeeInfo, FeeMerkleProof, Leaf, ProposalValidationError, }; use ethers::{abi::Address, types::U256}; use hotshot_types::{ @@ -301,14 +300,13 @@ mod test { }; use jf_merkle_tree::{ForgetableMerkleTreeScheme, MerkleTreeError}; use jf_vid::VidScheme; - use sequencer_utils::ser::FromStringOrInteger; + use sequencer_utils::{ser::FromStringOrInteger, test_utils::setup_test}; use super::*; #[test] fn test_fee_proofs() { - setup_logging(); - setup_backtrace(); + setup_test(); let mut tree = ValidatedState::default().fee_merkle_tree; let account1 = Address::random(); @@ -346,8 +344,7 @@ mod test { #[async_std::test] async fn test_validation_max_block_size() { - setup_logging(); - setup_backtrace(); + setup_test(); const MAX_BLOCK_SIZE: usize = 10; let payload = [0; 2 * MAX_BLOCK_SIZE]; @@ -381,8 +378,7 @@ mod test { #[async_std::test] async fn test_validation_base_fee() { - setup_logging(); - setup_backtrace(); + setup_test(); let max_block_size = 10; let payload = [0; 1]; @@ -406,7 +402,7 @@ mod test { ProposalValidationError::InsufficientFee { max_block_size: instance.chain_config.max_block_size, base_fee: instance.chain_config.base_fee, - proposed_fee: header.fee_info().amount() + proposed_fee: header.fee_info().amount().unwrap() }, err ); @@ -414,8 +410,7 @@ mod test { #[test] fn test_charge_fee() { - setup_logging(); - setup_backtrace(); + setup_test(); let src = FeeAccount::generated_from_seed_indexed([0; 32], 0).0; let dst = FeeAccount::generated_from_seed_indexed([0; 32], 1).0; diff --git a/types/Cargo.toml b/types/Cargo.toml index 6bee347a9..0ac8d9131 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -46,8 +46,10 @@ serde_json = { workspace = true } sha2 = "0.10" # TODO temporary, used only for VID, should be set in hotshot snafu = { workspace = true } static_assertions = { workspace = true } +surf-disco = { workspace = true } tagged-base64 = { workspace = true } thiserror = { workspace = true } +tide-disco = { workspace = true } time = { workspace = true } tracing = { workspace = true } url = { workspace = true } diff --git a/types/README.md b/types/README.md index b3bf5016b..502bc9da2 100644 --- a/types/README.md +++ b/types/README.md @@ -1,97 +1,87 @@ # Espresso Types -This crate provides the data types that make up the Espresso Sequencing Marketplace, along with -the logic that defines how these types interact and evolve as the network operates. It also provides -a versioning system that enables network upgrades, including changes to data types, that maintains -adequate compatibility with older types so that newer versions of the software are able to interpret -an Espresso blockchain all the way back to genesis. +This crate provides the data types that make up the Espresso Sequencing Marketplace, along with the logic that defines +how these types interact and evolve as the network operates. It also provides a versioning system that enables network +upgrades, including changes to data types, that maintains adequate compatibility with older types so that newer versions +of the software are able to interpret an Espresso blockchain all the way back to genesis. ## Design Principles ### Compatibility Within Reason -Blockchains have the unique problem of needing to maintain backwards compatibility with every -previous version of the protocol, so that old data can be interpreted and replayed as necessary to -derive the current blockchain state. Thus, it is highly desirable to have one set of types at any -given time, which is backwards compatible with all older shipped versions, and to minimize -differences between versions so that we can avoid as much as possible enum types and conditional -logic to handle different versions. - -To the extent that differences between versions are minimized, it is practical to maintain one -codebase with full backwards compatibility, with only minor conditionals limited in scope. Due to -this strong compatibility, changes made in this manner -- that is, affecting some logic but -maintaining one coherent set of types and backwards serialization compatibility -- correspond to -minor version changes. - -Over time, it is possible that these minor changes will accumulate to the point where it is -infeasible to handle all the various cases in one set of code. Or, a significant protocol upgrade -might make it impractical to maintain backwards compatibility using a single set of types and logic. -In this case, a _major_ version increment may be necessary, where we create a new set of types and -logic with a fresh slate and no backwards compatibility burden. In such cases, applications that use -this crate (e.g. consensus, archival query service) will be responsible for switching between two -sets of types (e.g. major versions 1 and 2) as necessary, depending on what part of the history of -the blockchain they are dealing with. +Blockchains have the unique problem of needing to maintain backwards compatibility with every previous version of the +protocol, so that old data can be interpreted and replayed as necessary to derive the current blockchain state. Thus, it +is highly desirable to have one set of types at any given time, which is backwards compatible with all older shipped +versions, and to minimize differences between versions so that we can avoid as much as possible enum types and +conditional logic to handle different versions. + +To the extent that differences between versions are minimized, it is practical to maintain one codebase with full +backwards compatibility, with only minor conditionals limited in scope. Due to this strong compatibility, changes made +in this manner -- that is, affecting some logic but maintaining one coherent set of types and backwards serialization +compatibility -- correspond to minor version changes. + +Over time, it is possible that these minor changes will accumulate to the point where it is infeasible to handle all the +various cases in one set of code. Or, a significant protocol upgrade might make it impractical to maintain backwards +compatibility using a single set of types and logic. In this case, a _major_ version increment may be necessary, where +we create a new set of types and logic with a fresh slate and no backwards compatibility burden. In such cases, +applications that use this crate (e.g. consensus, archival query service) will be responsible for switching between two +sets of types (e.g. major versions 1 and 2) as necessary, depending on what part of the history of the blockchain they +are dealing with. ### Separation of Data from Code -Due to the constraints of serialization, and specifically the desirability of maintaining `serde` -compatibility as much as possible, the most practical way to handle different versions of data is to -have independent, parallel definitions of the data types for each supported version. These -definitions exist in their own namespaces within this crate, such as `v0_1::Header` for the `Header` -type from version 0.1, `v0_2::Header`, etc. - -Code, on the other hand, benefits from being as unified as possible. Having entirely separate -implementations for each version would make it harder to spot differences and similarities between -versions visually, increase the burden of maintenance and testing, and lead to large amounts of -duplicate code where logic hasn't changed between versions (or else a confusing mess of slightly -customizable helper functions shared across versions). - -As such, for each _major_ version, there is one implementation of the network logic that encompasses -all minor versions. Each major version defines top-level types like `v0::Header` which are -compatible across that entire major version. For example, `v0::Header` implements -`From` and `From`. Its serialization will output the appropriate minor -version format depending on which minor version was used to construct the header, and it implements +Due to the constraints of serialization, and specifically the desirability of maintaining `serde` compatibility as much +as possible, the most practical way to handle different versions of data is to have independent, parallel definitions of +the data types for each supported version. These definitions exist in their own namespaces within this crate, such as +`v0_1::Header` for the `Header` type from version 0.1, `v0_2::Header`, etc. + +Code, on the other hand, benefits from being as unified as possible. Having entirely separate implementations for each +version would make it harder to spot differences and similarities between versions visually, increase the burden of +maintenance and testing, and lead to large amounts of duplicate code where logic hasn't changed between versions (or +else a confusing mess of slightly customizable helper functions shared across versions). + +As such, for each _major_ version, there is one implementation of the network logic that encompasses all minor versions. +Each major version defines top-level types like `v0::Header` which are compatible across that entire major version. For +example, `v0::Header` implements `From` and `From`. Its serialization will output the +appropriate minor version format depending on which minor version was used to construct the header, and it implements `deserialize_as(Version)` which interprets the input as the specified format version. -This major version compatibility header implements all of the network logic for all minor versions -within its major version; operations on headers and states will follow the logic for the minor -version which was used to construct the header. +This major version compatibility header implements all of the network logic for all minor versions within its major +version; operations on headers and states will follow the logic for the minor version which was used to construct the +header. ## Repository Structure -The repository is divided into top-level modules for each supported major version. All types from -the most recent major version are also re-exported from the top level of the crate. This allows -applications which intend to stay up-to-date with the latest types to import directly from the top -level, and then a simple `cargo update` is sufficient to bring in the latest types, at which point -the application can be updated as necessary. Meanwhile, applications that intend to pin to a -specific stable major version can import the versioned types from the appropriate module. +The repository is divided into top-level modules for each supported major version. All types from the most recent major +version are also re-exported from the top level of the crate. This allows applications which intend to stay up-to-date +with the latest types to import directly from the top level, and then a simple `cargo update` is sufficient to bring in +the latest types, at which point the application can be updated as necessary. Meanwhile, applications that intend to pin +to a specific stable major version can import the versioned types from the appropriate module. -The structure of each major version module mirrors the top level structure recursively. There are -sub-modules for each minor version within that major version, and the latest types for that major -version are reexported from the major version module itself. +The structure of each major version module mirrors the top level structure recursively. There are sub-modules for each +minor version within that major version, and the latest types for that major version are reexported from the major +version module itself. -Note that the minor version sub-modules _only_ define data structures and derivable trait -implementations (such as `Debug` and `serde` traits). All operations on these data structures, -including constructors and field accessors, are defined in the major version module. This upholds -design principle 2, by separating the versioned data structure layouts from the version-agnostic -Rust interfaces we use to deal with these data structures. +Note that the minor version sub-modules _only_ define data structures and derivable trait implementations (such as +`Debug` and `serde` traits). All operations on these data structures, including constructors and field accessors, are +defined in the major version module. This upholds design principle 2, by separating the versioned data structure layouts +from the version-agnostic Rust interfaces we use to deal with these data structures. -Each major version module also contains a `traits` submodule containing implementations of HotShot -traits for the types for that major version, allowing them to be used to instantiate HotShot -consensus and related applications, like the query service. +Each major version module also contains a `traits` submodule containing implementations of HotShot traits for the types +for that major version, allowing them to be used to instantiate HotShot consensus and related applications, like the +query service. ## Conventions and Best Practices ### Use re-exports to minimize duplicated data structures -Data structures that have not changed from one minor version to the next can be re-exported from the -previous minor version. E.g. in `v0::v0_2`, we might have `pub use super::v0_1::ChainConfig` if the -`ChainConfig` type has not changed between these two versions. +Data structures that have not changed from one minor version to the next can be re-exported from the previous minor +version. E.g. in `v0::v0_2`, we might have `pub use super::v0_1::ChainConfig` if the `ChainConfig` type has not changed +between these two versions. -Data structures that have not changed across any minor version within a major version can be -re-exported in the major version module from the latest minor version, but a static assertion must -be present checking that the re-exported type is the same type as exported from each of the minor -version modules, e.g. +Data structures that have not changed across any minor version within a major version can be re-exported in the major +version module from the latest minor version, but a static assertion must be present checking that the re-exported type +is the same type as exported from each of the minor version modules, e.g. ```rust pub use v0_2::ChainConfig; @@ -101,20 +91,18 @@ static_assert_unchanged_type!(ChainConfig); ### All fields are private -The goal of each major version is to provide a consistent Rust interface that works regardless of -which minor version is being used for the underlying data structure. To achieve this while allowing -changes in the data layout, all fields should be private (or `pub(crate)`). All consumers of this -crate should access the data via public methods defined in the major version module, since the -implementation of these methods can often be changed without changing the interface in case the -data layout changes. +The goal of each major version is to provide a consistent Rust interface that works regardless of which minor version is +being used for the underlying data structure. To achieve this while allowing changes in the data layout, all fields +should be private (or `pub(crate)`). All consumers of this crate should access the data via public methods defined in +the major version module, since the implementation of these methods can often be changed without changing the interface +in case the data layout changes. ### Unversioned types considered code -The pain of maintaining parallel sets of versioned types means we should only do it when absolutely -necessary: for serializable types that are used either as consensus messages or persistent storage, -or for types used to define such types. +The pain of maintaining parallel sets of versioned types means we should only do it when absolutely necessary: for +serializable types that are used either as consensus messages or persistent storage, or for types used to define such +types. -Other types which are used only as part of the Rust API, as transient, in-memory types, should be -defined alongside implementations and treated as part of code, not data. An example is the -`EthKeyPair` type, which is only used as a convenient wrapper to hold a public and private key pair, -but does not appear as part of any serialized data structure. +Other types which are used only as part of the Rust API, as transient, in-memory types, should be defined alongside +implementations and treated as part of code, not data. An example is the `EthKeyPair` type, which is only used as a +convenient wrapper to hold a public and private key pair, but does not appear as part of any serialized data structure. diff --git a/types/src/reference_tests.rs b/types/src/reference_tests.rs old mode 100644 new mode 100755 index 5f904e7b1..238513ac2 --- a/types/src/reference_tests.rs +++ b/types/src/reference_tests.rs @@ -23,9 +23,7 @@ use std::{fmt::Debug, path::Path, str::FromStr}; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use committable::Committable; -use es_version::SequencerVersion; use hotshot_query_service::availability::QueryablePayload; use hotshot_types::traits::{ block_contents::vid_commitment, signature_key::BuilderSignatureKey, BlockPayload, EncodeBytes, @@ -33,7 +31,7 @@ use hotshot_types::traits::{ use jf_merkle_tree::MerkleTreeScheme; use pretty_assertions::assert_eq; use rand::{Rng, RngCore}; -use sequencer_utils::commitment_to_u256; +use sequencer_utils::{commitment_to_u256, test_utils::setup_test}; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use tagged_base64::TaggedBase64; @@ -43,11 +41,13 @@ use vbs::{ }; use crate::{ - ChainConfig, FeeAccount, FeeInfo, Header, L1BlockInfo, NamespaceId, NsTable, Payload, SeqTypes, + v0_1, FeeAccount, FeeInfo, Header, L1BlockInfo, NamespaceId, NsTable, Payload, SeqTypes, Transaction, ValidatedState, }; -type Serializer = vbs::Serializer; +type V1Serializer = vbs::Serializer>; +type V2Serializer = vbs::Serializer>; +type V3Serializer = vbs::Serializer>; async fn reference_payload() -> Payload { const NUM_NS_IDS: usize = 3; @@ -92,19 +92,23 @@ fn reference_l1_block() -> L1BlockInfo { const REFERENCE_L1_BLOCK_COMMITMENT: &str = "L1BLOCK~4HpzluLK2Isz3RdPNvNrDAyQcWOF2c9JeLZzVNLmfpQ9"; -fn reference_chain_config() -> ChainConfig { - ChainConfig { +fn reference_chain_config() -> crate::v0_3::ChainConfig { + crate::v0_3::ChainConfig { chain_id: 0x8a19.into(), max_block_size: 10240.into(), base_fee: 0.into(), fee_contract: Some(Default::default()), fee_recipient: Default::default(), + bid_recipient: Some(Default::default()), } } -const REFERENCE_CHAIN_CONFIG_COMMITMENT: &str = +const REFERENCE_V1_CHAIN_CONFIG_COMMITMENT: &str = "CHAIN_CONFIG~L6HmMktJbvnEGgpmRrsiYvQmIBstSj9UtDM7eNFFqYFO"; +const REFERENCE_V3_CHAIN_CONFIG_COMMITMENT: &str = + "CHAIN_CONFIG~1mJTBiaJ0Nyuu4Ir5IZTamyI8CjexbktPkRr6R1rtnGh"; + fn reference_fee_info() -> FeeInfo { FeeInfo::new( FeeAccount::from_str("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266").unwrap(), @@ -132,7 +136,7 @@ async fn reference_header(version: Version) -> Header { let state = ValidatedState::default(); Header::create( - reference_chain_config().into(), + reference_chain_config(), 42, 789, 124, @@ -142,15 +146,15 @@ async fn reference_header(version: Version) -> Header { ns_table, state.fee_merkle_tree.commitment(), state.block_merkle_tree.commitment(), - fee_info, - Some(builder_signature), + vec![fee_info], + vec![builder_signature], version, ) } const REFERENCE_V1_HEADER_COMMITMENT: &str = "BLOCK~dh1KpdvvxSvnnPpOi2yI3DOg8h6ltr2Kv13iRzbQvtN2"; const REFERENCE_V2_HEADER_COMMITMENT: &str = "BLOCK~V0GJjL19nCrlm9n1zZ6gaOKEekSMCT6uR5P-h7Gi6UJR"; -const REFERENCE_V3_HEADER_COMMITMENT: &str = "BLOCK~X3j5MJWJrye2dKJv5uRLk5-z3augpUDftecBFO6dYahF"; +const REFERENCE_V3_HEADER_COMMITMENT: &str = "BLOCK~oqbUzqJdG4JfWCDpCQWsLDjb47Rx_OH6KVsKQFOl4S2n"; fn reference_transaction(ns_id: NamespaceId, rng: &mut R) -> Transaction where @@ -173,8 +177,7 @@ fn reference_test_without_committable V1Serializer::serialize(&reference).unwrap(), + "v2" => V2Serializer::serialize(&reference).unwrap(), + "v3" => V3Serializer::serialize(&reference).unwrap(), + _ => panic!("invalid version"), + }; if actual != expected { // Write the actual output to a file to make it easier to compare with/replace the expected // file if the serialization change was actually intended. @@ -247,7 +256,14 @@ change in the serialization of this data structure. } // Check that we can deserialize from the reference binary object. - let parsed: T = Serializer::deserialize(&expected).unwrap(); + // todo: (ab) cleanup + let parsed: T = match version { + "v1" => V1Serializer::deserialize(&expected).unwrap(), + "v2" => V2Serializer::deserialize(&expected).unwrap(), + "v3" => V3Serializer::deserialize(&expected).unwrap(), + _ => panic!("invalid version"), + }; + assert_eq!( *reference, parsed, "Reference object commitment does not match commitment of parsed binary object. This is @@ -261,8 +277,7 @@ fn reference_test( reference: T, commitment: &str, ) { - setup_logging(); - setup_backtrace(); + setup_test(); reference_test_without_committable(version, name, &reference); @@ -322,12 +337,22 @@ fn test_reference_l1_block() { } #[test] -fn test_reference_chain_config() { +fn test_reference_v1_chain_config() { reference_test( "v1", "chain_config", + v0_1::ChainConfig::from(reference_chain_config()), + REFERENCE_V1_CHAIN_CONFIG_COMMITMENT, + ); +} + +#[test] +fn test_reference_v3_chain_config() { + reference_test( + "v3", + "chain_config", reference_chain_config(), - REFERENCE_CHAIN_CONFIG_COMMITMENT, + REFERENCE_V3_CHAIN_CONFIG_COMMITMENT, ); } @@ -342,21 +367,27 @@ fn test_reference_fee_info() { } #[async_std::test] -async fn test_reference_header() { +async fn test_reference_header_v1() { reference_test( "v1", "header", reference_header(StaticVersion::<0, 1>::version()).await, REFERENCE_V1_HEADER_COMMITMENT, ); +} +#[async_std::test] +async fn test_reference_header_v2() { reference_test( "v2", "header", reference_header(StaticVersion::<0, 2>::version()).await, REFERENCE_V2_HEADER_COMMITMENT, ); +} +#[async_std::test] +async fn test_reference_header_v3() { reference_test( "v3", "header", @@ -364,7 +395,6 @@ async fn test_reference_header() { REFERENCE_V3_HEADER_COMMITMENT, ); } - #[test] fn test_reference_transaction() { reference_test( diff --git a/types/src/v0/header.rs b/types/src/v0/header.rs index 0083edb50..014d4938d 100644 --- a/types/src/v0/header.rs +++ b/types/src/v0/header.rs @@ -2,7 +2,10 @@ use committable::Commitment; use serde::{Deserialize, Serialize}; use vbs::version::Version; -use crate::{v0_1, v0_2, v0_3, ChainConfig}; +use crate::{ + v0_1::{self, ChainConfig}, + v0_2, v0_3, +}; /// Each variant represents a specific minor version header. #[derive(Clone, Debug, Hash, PartialEq, Eq)] diff --git a/types/src/v0/impls/auction.rs b/types/src/v0/impls/auction.rs index 24899ae2a..916773c16 100644 --- a/types/src/v0/impls/auction.rs +++ b/types/src/v0/impls/auction.rs @@ -1,21 +1,25 @@ +use super::state::ValidatedState; use crate::{ eth_signature_key::{EthKeyPair, SigningError}, - v0_1::ValidatedState, - v0_3::{BidTx, BidTxBody, FullNetworkTx}, + v0_3::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}, FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, }; +use async_trait::async_trait; use committable::{Commitment, Committable}; use ethers::types::Signature; use hotshot_types::{ data::ViewNumber, traits::{ - auction_results_provider::HasUrls, node_implementation::ConsensusTime, + auction_results_provider::{AuctionResultsProvider, HasUrls}, + node_implementation::{ConsensusTime, NodeType}, signature_key::BuilderSignatureKey, }, }; use std::str::FromStr; use thiserror::Error; +use tide_disco::error::ServerError; use url::Url; +use vbs::version::StaticVersion; impl FullNetworkTx { /// Proxy for `execute` method of each transaction variant. @@ -93,11 +97,20 @@ impl BidTxBody { pub fn amount(&self) -> FeeAmount { self.bid_amount } + /// get the view number + pub fn view(&self) -> ViewNumber { + self.view + } /// Instantiate a `BidTxBody` containing the values of `self` /// with a new `url` field. pub fn with_url(self, url: Url) -> Self { Self { url, ..self } } + + /// Get the cloned `url` field. + fn url(&self) -> Url { + self.url.clone() + } } impl Default for BidTxBody { @@ -137,6 +150,9 @@ pub enum ExecutionError { #[error("Could not resolve `ChainConfig`")] /// Could not resolve `ChainConfig`. UnresolvableChainConfig, + #[error("Bid recipient not set on `ChainConfig`")] + /// Bid Recipient is not set on `ChainConfig` + BidRecipientNotFound, } impl From for ExecutionError { @@ -169,8 +185,9 @@ impl BidTx { return Err(ExecutionError::UnresolvableChainConfig); }; - // TODO change to `bid_recipient` when this logic is finally enabled - let recipient = chain_config.fee_recipient; + let Some(recipient) = chain_config.bid_recipient else { + return Err(ExecutionError::BidRecipientNotFound); + }; // Charge the bid amount state .charge_fee(FeeInfo::new(self.account(), self.amount()), recipient) @@ -213,19 +230,77 @@ impl BidTx { pub fn account(&self) -> FeeAccount { self.body.account } + /// get the view number + pub fn view(&self) -> ViewNumber { + self.body.view + } + /// Get the `url` field from the body. + fn url(&self) -> Url { + self.body.url() + } } -impl HasUrls for BidTx { - /// Get the `url` field from the body. - fn urls(&self) -> Vec { - self.body.urls() +impl SolverAuctionResults { + pub fn new( + view_number: ViewNumber, + winning_bids: Vec, + reserve_bids: Vec<(NamespaceId, Url)>, + ) -> Self { + Self { + view_number, + winning_bids, + reserve_bids, + } + } + + pub fn view(&self) -> ViewNumber { + self.view_number + } + + pub fn winning_bids(&self) -> &[BidTx] { + &self.winning_bids + } + pub fn reserve_bids(&self) -> &[(NamespaceId, Url)] { + &self.reserve_bids + } + pub fn genesis() -> Self { + Self { + view_number: ViewNumber::genesis(), + winning_bids: vec![], + reserve_bids: vec![], + } } } -impl HasUrls for BidTxBody { - /// Get the cloned `url` field. +impl HasUrls for SolverAuctionResults { fn urls(&self) -> Vec { - vec![self.url.clone()] + self.winning_bids() + .iter() + .map(|bid| bid.url()) + .chain(self.reserve_bids().iter().map(|bid| bid.1.clone())) + .collect() + } +} + +const SOLVER_URL: &str = "https://solver:1234"; +type Ver = StaticVersion<0, 3>; +type SurfClient = surf_disco::Client; + +#[async_trait] +impl AuctionResultsProvider for SolverAuctionResults { + type AuctionResult = SolverAuctionResults; + + /// Fetch the auction results. + async fn fetch_auction_result( + &self, + view_number: TYPES::Time, + ) -> anyhow::Result { + let resp = SurfClient::::new(Url::from_str(SOLVER_URL).unwrap()) + .get::(&format!("/v0/api/auction_results/{}", *view_number)) + .send() + .await + .unwrap(); + Ok(resp) } } @@ -246,6 +321,7 @@ mod test { } #[test] + #[ignore] // TODO enable after upgrade to v3 fn test_mock_bid_tx_charge() { let mut state = ValidatedState::default(); let key = FeeAccount::test_key_pair(); diff --git a/types/src/v0/impls/block/full_payload/ns_table/test.rs b/types/src/v0/impls/block/full_payload/ns_table/test.rs index 6379bff2a..eb2bff488 100644 --- a/types/src/v0/impls/block/full_payload/ns_table/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_table/test.rs @@ -1,6 +1,6 @@ -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot::traits::BlockPayload; use rand::{Rng, RngCore}; +use sequencer_utils::test_utils::setup_test; use crate::{ v0::impls::block::{ @@ -17,8 +17,7 @@ use crate::{ #[test] fn random_valid() { - setup_logging(); - setup_backtrace(); + setup_test(); let mut rng = jf_utils::test_rng(); for num_entries in 0..20 { @@ -28,8 +27,7 @@ fn random_valid() { #[test] fn ns_table_byte_len() { - setup_logging(); - setup_backtrace(); + setup_test(); let mut rng = jf_utils::test_rng(); // Extremely small byte lengths should get rejected. @@ -64,8 +62,7 @@ fn ns_table_byte_len() { #[async_std::test] async fn payload_byte_len() { - setup_logging(); - setup_backtrace(); + setup_test(); let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; let mut rng = jf_utils::test_rng(); let test = ValidTest::from_tx_lengths(test_case, &mut rng); @@ -128,8 +125,7 @@ async fn payload_byte_len() { #[test] fn monotonic_increase() { - setup_logging(); - setup_backtrace(); + setup_test(); // Duplicate namespace ID two_entries_ns_table((5, 5), (5, 6), Some(DuplicateNamespaceId)); @@ -171,8 +167,7 @@ fn monotonic_increase() { // https://github.com/EspressoSystems/espresso-sequencer/issues/1604 #[test] fn header() { - setup_logging(); - setup_backtrace(); + setup_test(); let mut rng = jf_utils::test_rng(); for num_entries in 0..20 { diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index 4d705a045..87d21178e 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -12,9 +12,10 @@ use jf_vid::VidScheme; use sha2::Digest; use crate::{ - ChainConfig, Index, Iter, NamespaceId, NodeState, NsIndex, NsPayload, NsPayloadBuilder, - NsPayloadRange, NsTable, NsTableBuilder, Payload, PayloadByteLen, SeqTypes, Transaction, - TxProof, ValidatedState, + v0::impls::{NodeState, ValidatedState}, + v0_1::ChainConfig, + Index, Iter, NamespaceId, NsIndex, NsPayload, NsPayloadBuilder, NsPayloadRange, NsTable, + NsTableBuilder, Payload, PayloadByteLen, SeqTypes, Transaction, TxProof, }; impl Payload { @@ -146,7 +147,11 @@ impl BlockPayload for Payload { } }; - Self::from_transactions_sync(transactions, chain_config, instance_state) + Self::from_transactions_sync( + transactions, + ChainConfig::from(chain_config), + instance_state, + ) } // TODO avoid cloning the entire payload here? diff --git a/types/src/v0/impls/block/test.rs b/types/src/v0/impls/block/test.rs old mode 100644 new mode 100755 index d919c4434..b75b10537 --- a/types/src/v0/impls/block/test.rs +++ b/types/src/v0/impls/block/test.rs @@ -1,15 +1,15 @@ #![cfg(test)] use std::collections::BTreeMap; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot::traits::BlockPayload; use hotshot_query_service::availability::QueryablePayload; use hotshot_types::{traits::EncodeBytes, vid::vid_scheme}; use jf_vid::VidScheme; use rand::RngCore; +use sequencer_utils::test_utils::setup_test; use crate::{ - BlockSize, ChainConfig, NamespaceId, NodeState, NsProof, Payload, Transaction, TxProof, + v0_3::ChainConfig, BlockSize, NamespaceId, NodeState, NsProof, Payload, Transaction, TxProof, ValidatedState, }; @@ -20,8 +20,7 @@ async fn basic_correctness() { vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]], // 3 non-empty namespaces ]; - setup_logging(); - setup_backtrace(); + setup_test(); let mut rng = jf_utils::test_rng(); let valid_tests = ValidTest::many_from_tx_lengths(test_cases, &mut rng); @@ -106,8 +105,7 @@ async fn basic_correctness() { #[async_std::test] async fn enforce_max_block_size() { - setup_logging(); - setup_backtrace(); + setup_test(); let test_case = vec![vec![5, 8, 8], vec![7, 9, 11], vec![10, 5, 8]]; let payload_byte_len_expected: usize = 119; let ns_table_byte_len_expected: usize = 28; diff --git a/types/src/v0/impls/chain_config.rs b/types/src/v0/impls/chain_config.rs index 1593dbf45..1c664cd22 100644 --- a/types/src/v0/impls/chain_config.rs +++ b/types/src/v0/impls/chain_config.rs @@ -1,16 +1,13 @@ -use std::str::FromStr; - use bytesize::ByteSize; -use committable::{Commitment, Committable}; use derive_more::From; use ethers::types::U256; -use itertools::Either; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; use snafu::Snafu; +use std::str::FromStr; -use crate::{BlockSize, ChainConfig, ChainId, ResolvableChainConfig}; +use crate::{BlockSize, ChainId}; impl_serde_from_string_or_integer!(ChainId); impl_to_fixed_bytes!(ChainId, U256); @@ -77,69 +74,6 @@ impl FromStringOrInteger for BlockSize { } } -impl Default for ChainConfig { - fn default() -> Self { - Self { - chain_id: U256::from(35353).into(), // arbitrarily chosen chain ID - max_block_size: 30720.into(), - base_fee: 0.into(), - fee_contract: None, - fee_recipient: Default::default(), - } - } -} - -impl Committable for ChainConfig { - fn tag() -> String { - "CHAIN_CONFIG".to_string() - } - - fn commit(&self) -> Commitment { - let comm = committable::RawCommitmentBuilder::new(&Self::tag()) - .fixed_size_field("chain_id", &self.chain_id.to_fixed_bytes()) - .u64_field("max_block_size", *self.max_block_size) - .fixed_size_field("base_fee", &self.base_fee.to_fixed_bytes()) - .fixed_size_field("fee_recipient", &self.fee_recipient.to_fixed_bytes()); - let comm = if let Some(addr) = self.fee_contract { - comm.u64_field("fee_contract", 1).fixed_size_bytes(&addr.0) - } else { - comm.u64_field("fee_contract", 0) - }; - comm.finalize() - } -} - -impl ResolvableChainConfig { - pub fn commit(&self) -> Commitment { - match self.chain_config { - Either::Left(config) => config.commit(), - Either::Right(commitment) => commitment, - } - } - pub fn resolve(self) -> Option { - match self.chain_config { - Either::Left(config) => Some(config), - Either::Right(_) => None, - } - } -} - -impl From> for ResolvableChainConfig { - fn from(value: Commitment) -> Self { - Self { - chain_config: Either::Right(value), - } - } -} - -impl From for ResolvableChainConfig { - fn from(value: ChainConfig) -> Self { - Self { - chain_config: Either::Left(value), - } - } -} - #[derive(Clone, Debug, From, Snafu)] pub struct ParseSizeError { msg: String, @@ -151,6 +85,8 @@ pub fn parse_size(s: &str) -> Result { #[cfg(test)] mod tests { + use crate::v0_3::{ChainConfig, ResolvableChainConfig}; + use super::*; #[test] diff --git a/types/src/v0/impls/fee_info.rs b/types/src/v0/impls/fee_info.rs index f4fe0acf6..ca9e5f625 100644 --- a/types/src/v0/impls/fee_info.rs +++ b/types/src/v0/impls/fee_info.rs @@ -14,6 +14,7 @@ use ethers::{ }; use hotshot_query_service::explorer::MonetaryValue; use hotshot_types::traits::block_contents::BuilderFee; +use itertools::Itertools; use jf_merkle_tree::{ ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, LookupResult, MerkleCommitment, MerkleTreeError, MerkleTreeScheme, ToTraversalPath, @@ -26,8 +27,9 @@ use sequencer_utils::{ use thiserror::Error; use crate::{ - eth_signature_key::EthKeyPair, AccountQueryData, FeeAccount, FeeAccountProof, FeeAmount, - FeeInfo, FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, SeqTypes, + eth_signature_key::EthKeyPair, v0_3::IterableFeeInfo, AccountQueryData, FeeAccount, + FeeAccountProof, FeeAmount, FeeInfo, FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, + SeqTypes, }; /// Possible charge fee failures @@ -73,6 +75,48 @@ impl FeeInfo { pub fn amount(&self) -> FeeAmount { self.amount } + /// Get a `Vec` from `Vec` + pub fn from_builder_fees(fees: Vec>) -> Vec { + fees.into_iter().map(FeeInfo::from).collect() + } +} + +impl IterableFeeInfo for Vec { + /// Get sum of fee amounts + fn amount(&self) -> Option { + self.iter() + // getting the u64 tests that the value fits + .map(|fee_info| fee_info.amount.as_u64()) + .collect::>>() + .and_then(|amounts| amounts.iter().try_fold(0u64, |acc, n| acc.checked_add(*n))) + .map(FeeAmount::from) + } + + /// Get a `Vec` of all unique fee accounts + fn accounts(&self) -> Vec { + self.iter() + .unique_by(|entry| &entry.account) + .map(|entry| entry.account) + .collect() + } +} + +impl IterableFeeInfo for Vec> { + /// Get sum of amounts + fn amount(&self) -> Option { + self.iter() + .map(|fee_info| fee_info.fee_amount) + .try_fold(0u64, |acc, n| acc.checked_add(n)) + .map(FeeAmount::from) + } + + /// Get a `Vec` of all unique fee accounts + fn accounts(&self) -> Vec { + self.iter() + .unique_by(|entry| &entry.fee_account) + .map(|entry| entry.fee_account) + .collect() + } } impl From> for FeeInfo { @@ -375,3 +419,26 @@ impl From<(FeeAccountProof, U256)> for AccountQueryData { Self { balance, proof } } } + +#[cfg(test)] +mod test { + use ethers::abi::Address; + + use crate::{FeeAccount, FeeAmount, FeeInfo}; + + use super::IterableFeeInfo; + + #[test] + fn test_iterable_fee_info() { + let addr = Address::zero(); + let fee = FeeInfo::new(addr, FeeAmount::from(1)); + let fees = vec![fee, fee, fee]; + // check the sum of amounts + let sum = fees.amount().unwrap(); + assert_eq!(FeeAmount::from(3), sum); + + // check accounts collector + let accounts = fees.accounts(); + assert_eq!(vec![FeeAccount::from(Address::zero())], accounts); + } +} diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 841e47f25..1b1068b2e 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -1,5 +1,3 @@ -use std::fmt; - use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; @@ -21,18 +19,22 @@ use serde::{ }; use serde_json::{Map, Value}; use snafu::Snafu; +use std::fmt; use thiserror::Error; use time::OffsetDateTime; use vbs::version::Version; use crate::{ v0::header::{EitherOrVersion, VersionedHeader}, - v0_1, v0_2, v0_3, BlockMerkleCommitment, BlockSize, BuilderSignature, ChainConfig, FeeAccount, - FeeAmount, FeeInfo, FeeMerkleCommitment, Header, L1BlockInfo, L1Snapshot, Leaf, NamespaceId, - NodeState, NsTable, NsTableValidationError, ResolvableChainConfig, SeqTypes, UpgradeType, - ValidatedState, + v0_1, v0_2, + v0_3::{self, ChainConfig, IterableFeeInfo, SolverAuctionResults}, + BlockMerkleCommitment, BlockSize, BuilderSignature, FeeAccount, FeeAmount, FeeInfo, + FeeMerkleCommitment, Header, L1BlockInfo, L1Snapshot, Leaf, NamespaceId, NsTable, + NsTableValidationError, SeqTypes, UpgradeType, }; +use super::{instance_state::NodeState, state::ValidatedState}; + /// Possible proposal validation failures #[derive(Error, Debug, Eq, PartialEq)] pub enum ProposalValidationError { @@ -69,6 +71,8 @@ pub enum ProposalValidationError { }, #[error("Invalid namespace table: {err}")] InvalidNsTable { err: NsTableValidationError }, + #[error("Some fee amount or their sum total out of range")] + SomeFeeAmountOutOfRange, } impl v0_1::Header { @@ -281,7 +285,7 @@ impl<'de> Deserialize<'de> for Header { impl Header { #[allow(clippy::too_many_arguments)] pub(crate) fn create( - chain_config: ResolvableChainConfig, + chain_config: ChainConfig, height: u64, timestamp: u64, l1_head: u64, @@ -291,18 +295,22 @@ impl Header { ns_table: NsTable, fee_merkle_tree_root: FeeMerkleCommitment, block_merkle_tree_root: BlockMerkleCommitment, - fee_info: FeeInfo, - builder_signature: Option, + fee_info: Vec, + builder_signature: Vec, version: Version, ) -> Self { let Version { major, minor } = version; // Ensure the major version is 0, otherwise panic assert!(major == 0, "Invalid major version {major}"); + // Ensure FeeInfo contains at least 1 element + assert!(!fee_info.is_empty(), "Invalid fee_info length: 0"); match minor { 1 => Self::V1(v0_1::Header { - chain_config, + chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config, + )), height, timestamp, l1_head, @@ -312,11 +320,13 @@ impl Header { ns_table, block_merkle_tree_root, fee_merkle_tree_root, - fee_info, - builder_signature, + fee_info: fee_info[0], // NOTE this is asserted to exist above + builder_signature: builder_signature.first().copied(), }), 2 => Self::V2(v0_2::Header { - chain_config, + chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config, + )), height, timestamp, l1_head, @@ -326,11 +336,11 @@ impl Header { ns_table, block_merkle_tree_root, fee_merkle_tree_root, - fee_info, - builder_signature, + fee_info: fee_info[0], // NOTE this is asserted to exist above + builder_signature: builder_signature.first().copied(), }), 3 => Self::V3(v0_3::Header { - chain_config, + chain_config: v0_3::ResolvableChainConfig::from(chain_config), height, timestamp, l1_head, @@ -342,6 +352,7 @@ impl Header { fee_merkle_tree_root, fee_info, builder_signature, + auction_results: SolverAuctionResults::genesis(), }), // This case should never occur // but if it does, we must panic @@ -376,16 +387,17 @@ impl Header { #[allow(clippy::too_many_arguments)] fn from_info( payload_commitment: VidCommitment, - builder_commitment: BuilderCommitment, + builder_commitment: Option, ns_table: NsTable, parent_leaf: &Leaf, mut l1: L1Snapshot, l1_deposits: &[FeeInfo], - builder_fee: BuilderFee, + builder_fee: Vec>, mut timestamp: u64, mut state: ValidatedState, chain_config: ChainConfig, version: Version, + auction_results: Option, ) -> anyhow::Result { ensure!( version.major == 0, @@ -450,40 +462,94 @@ impl Header { .context(format!("missing fee account {}", fee_info.account()))?; } - // Charge the builder fee. - ensure!( - builder_fee.fee_account.validate_fee_signature( - &builder_fee.fee_signature, - builder_fee.fee_amount, - &ns_table, - &payload_commitment, - ), - "invalid builder signature, account: {}, fee: {builder_fee:?}, ns_table: {ns_table:?}, payload_commitment: {payload_commitment}", - builder_fee.fee_account, - ); - let builder_signature = Some(builder_fee.fee_signature); - let fee_info = builder_fee.into(); - state - .charge_fee(fee_info, chain_config.fee_recipient) - .context(format!("invalid builder fee {fee_info:?}"))?; + // Validate and charge the builder fee. + for BuilderFee { + fee_account, + fee_signature, + fee_amount, + } in &builder_fee + { + ensure!( + fee_account.validate_fee_signature( + fee_signature, + *fee_amount, + &ns_table, + &payload_commitment, + ), + "invalid builder signature" + ); + + let fee_info = FeeInfo::new(*fee_account, *fee_amount); + state + .charge_fee(fee_info, chain_config.fee_recipient) + .context(format!("invalid builder fee {fee_info:?}"))?; + } + + let fee_info = FeeInfo::from_builder_fees(builder_fee.clone()); + + let builder_signature: Vec = + builder_fee.iter().map(|e| e.fee_signature).collect(); let fee_merkle_tree_root = state.fee_merkle_tree.commitment(); - Ok(Self::create( - chain_config.commit().into(), - height, - timestamp, - l1.head, - l1.finalized, - payload_commitment, - builder_commitment, - ns_table, - fee_merkle_tree_root, - block_merkle_tree_root, - fee_info, - builder_signature, - version, - )) + let Version { major, minor } = version; + + assert!(major == 0, "Invalid major version {major}"); + + let header = match minor { + 1 => Self::V1(v0_1::Header { + chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config, + )), + height, + timestamp, + l1_head: l1.head, + l1_finalized: l1.finalized, + payload_commitment, + builder_commitment: builder_commitment.unwrap(), + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info: fee_info[0], + builder_signature: builder_signature.first().copied(), + }), + 2 => Self::V2(v0_2::Header { + chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config, + )), + height, + timestamp, + l1_head: l1.head, + l1_finalized: l1.finalized, + payload_commitment, + builder_commitment: builder_commitment.unwrap(), + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info: fee_info[0], + builder_signature: builder_signature.first().copied(), + }), + 3 => Self::V3(v0_3::Header { + chain_config: chain_config.into(), + height, + timestamp, + l1_head: l1.head, + l1_finalized: l1.finalized, + payload_commitment, + builder_commitment: builder_commitment.unwrap(), + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info, + builder_signature, + auction_results: auction_results.unwrap(), + }), + // This case should never occur + // but if it does, we must panic + // because we don't have the versioned types for this version + _ => panic!("invalid version: {version}"), + }; + Ok(header) } async fn get_chain_config( @@ -514,8 +580,12 @@ impl Header { impl Header { /// A commitment to a ChainConfig or a full ChainConfig. - pub fn chain_config(&self) -> &ResolvableChainConfig { - field!(self.chain_config) + pub fn chain_config(&self) -> v0_3::ResolvableChainConfig { + match self { + Self::V1(fields) => v0_3::ResolvableChainConfig::from(&fields.chain_config), + Self::V2(fields) => v0_3::ResolvableChainConfig::from(&fields.chain_config), + Self::V3(fields) => fields.chain_config, + } } pub fn height(&self) -> u64 { @@ -627,8 +697,12 @@ impl Header { } /// Fee paid by the block builder - pub fn fee_info(&self) -> FeeInfo { - *field!(self.fee_info) + pub fn fee_info(&self) -> Vec { + match self { + Self::V1(fields) => vec![fields.fee_info], + Self::V2(fields) => vec![fields.fee_info], + Self::V3(fields) => fields.fee_info.clone(), + } } /// Account (etheruem address) of builder @@ -639,8 +713,16 @@ impl Header { /// checked during consensus, any downstream client who has a proof of consensus finality of a /// header can trust that [`fee_info`](Self::fee_info) is correct without relying on the /// signature. Thus, this signature is not included in the header commitment. - pub fn builder_signature(&self) -> Option { - *field!(self.builder_signature) + pub fn builder_signature(&self) -> Vec { + match self { + // Previously we used `Option` to + // represent presence/absence of signature. The simplest + // way to represent the same now that we have a `Vec` is + // empty/non-empty + Self::V1(fields) => fields.builder_signature.as_slice().to_vec(), + Self::V2(fields) => fields.builder_signature.as_slice().to_vec(), + Self::V3(fields) => fields.builder_signature.clone(), + } } } @@ -663,6 +745,16 @@ impl From for InvalidBlockHeader { impl BlockHeader for Header { type Error = InvalidBlockHeader; + type AuctionResult = SolverAuctionResults; + + /// Get the results of the auction for this Header. Only used in post-marketplace versions + fn get_auction_results(&self) -> Option { + match self { + Self::V1(_) => None, + Self::V2(_) => None, + Self::V3(fields) => Some(fields.auction_results.clone()), + } + } #[tracing::instrument( skip_all, @@ -673,7 +765,128 @@ impl BlockHeader for Header { ), )] - async fn new( + /// Build a header with the parent validate state, instance-level state, parent leaf, payload + /// commitment, metadata, and auction results. This is only used in post-marketplace versions + async fn new_marketplace( + parent_state: &::ValidatedState, + instance_state: &<::ValidatedState as hotshot_types::traits::ValidatedState>::Instance, + parent_leaf: &hotshot_types::data::Leaf, + payload_commitment: VidCommitment, + metadata: <::BlockPayload as BlockPayload>::Metadata, + builder_fee: Vec>, + _vid_common: VidCommon, + auction_results: Option, + version: Version, + ) -> Result { + let height = parent_leaf.height(); + let view = parent_leaf.view_number(); + + let mut validated_state = parent_state.clone(); + + let chain_config = if version > instance_state.current_version { + match instance_state + .upgrades + .get(&version) + .map(|upgrade| match upgrade.upgrade_type { + UpgradeType::ChainConfig { chain_config } => chain_config, + }) { + Some(cf) => cf, + None => Header::get_chain_config(&validated_state, instance_state).await, + } + } else { + Header::get_chain_config(&validated_state, instance_state).await + }; + + validated_state.chain_config = chain_config.into(); + + // Fetch the latest L1 snapshot. + let l1_snapshot = instance_state.l1_client.snapshot().await; + // Fetch the new L1 deposits between parent and current finalized L1 block. + let l1_deposits = if let (Some(addr), Some(block_info)) = + (chain_config.fee_contract, l1_snapshot.finalized) + { + instance_state + .l1_client + .get_finalized_deposits( + addr, + parent_leaf + .block_header() + .l1_finalized() + .map(|block_info| block_info.number), + block_info.number, + ) + .await + } else { + vec![] + }; + // Find missing fee state entries. We will need to use the builder account which is paying a + // fee and the recipient account which is receiving it, plus any counts receiving deposits + // in this block. + + let missing_accounts = parent_state.forgotten_accounts( + [chain_config.fee_recipient] + .into_iter() + .chain(builder_fee.accounts()) + .chain(l1_deposits.accounts()), + ); + + if !missing_accounts.is_empty() { + tracing::warn!( + height, + ?view, + ?missing_accounts, + "fetching missing accounts from peers" + ); + + // Fetch missing fee state entries + let missing_account_proofs = instance_state + .peers + .as_ref() + .fetch_accounts( + height, + view, + parent_state.fee_merkle_tree.commitment(), + missing_accounts, + ) + .await?; + + // Insert missing fee state entries + for account in missing_account_proofs.iter() { + account + .proof + .remember(&mut validated_state.fee_merkle_tree) + .context("remembering fee account")?; + } + } + + // Ensure merkle tree has frontier + if validated_state.need_to_fetch_blocks_mt_frontier() { + tracing::warn!(height, ?view, "fetching block frontier from peers"); + instance_state + .peers + .as_ref() + .remember_blocks_merkle_tree(height, view, &mut validated_state.block_merkle_tree) + .await + .context("remembering block proof")?; + } + + Ok(Self::from_info( + payload_commitment, + None, + metadata, + parent_leaf, + l1_snapshot, + &l1_deposits, + builder_fee, + OffsetDateTime::now_utc().unix_timestamp() as u64, + validated_state, + chain_config, + version, + auction_results, + )?) + } + + async fn new_legacy( parent_state: &ValidatedState, instance_state: &NodeState, parent_leaf: &Leaf, @@ -775,16 +988,17 @@ impl BlockHeader for Header { Ok(Self::from_info( payload_commitment, - builder_commitment, + Some(builder_commitment), metadata, parent_leaf, l1_snapshot, &l1_deposits, - builder_fee, + vec![builder_fee], OffsetDateTime::now_utc().unix_timestamp() as u64, validated_state, chain_config, version, + None, )?) } @@ -805,7 +1019,7 @@ impl BlockHeader for Header { // The Header is versioned, // so we create the genesis header for the current version of the sequencer. Self::create( - instance_state.chain_config.into(), + instance_state.chain_config, 0, instance_state.genesis_header.timestamp.unix_timestamp(), instance_state @@ -818,8 +1032,8 @@ impl BlockHeader for Header { ns_table.clone(), fee_merkle_tree_root, block_merkle_tree_root, - FeeInfo::genesis(), - None, + vec![FeeInfo::genesis()], + vec![], instance_state.current_version, ) } @@ -852,20 +1066,22 @@ impl QueryableHeader for Header { impl ExplorerHeader for Header { type BalanceAmount = FeeAmount; - type WalletAddress = FeeAccount; - type ProposerId = FeeAccount; + type WalletAddress = Vec; + type ProposerId = Vec; type NamespaceId = NamespaceId; + // TODO what are these expected values w/ multiple Fees fn proposer_id(&self) -> Self::ProposerId { - self.fee_info().account() + self.fee_info().accounts() } fn fee_info_account(&self) -> Self::WalletAddress { - self.fee_info().account() + self.fee_info().accounts() } fn fee_info_balance(&self) -> Self::BalanceAmount { - self.fee_info().amount() + // TODO this will panic if some amount or total does not fit in a u64 + self.fee_info().amount().unwrap() } /// reward_balance at the moment is only implemented as a stub, as block @@ -889,13 +1105,13 @@ impl ExplorerHeader for Header { mod test_headers { use std::sync::Arc; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use ethers::{ types::{Address, U256}, utils::Anvil, }; use hotshot_types::{traits::signature_key::BuilderSignatureKey, vid::vid_scheme}; use jf_vid::VidScheme; + use sequencer_utils::test_utils::setup_test; use v0_1::{BlockMerkleTree, FeeMerkleTree, L1Client}; use vbs::{ bincode_serializer::BincodeSerializer, @@ -906,7 +1122,7 @@ mod test_headers { use super::*; use crate::{ eth_signature_key::EthKeyPair, v0::impls::instance_state::mock::MockStateCatchup, - validate_proposal, NodeState, + validate_proposal, }; #[derive(Debug, Default)] @@ -931,8 +1147,7 @@ mod test_headers { impl TestCase { async fn run(self) { - setup_logging(); - setup_backtrace(); + setup_test(); // Check test case validity. assert!(self.expected_timestamp >= self.parent_timestamp); @@ -975,7 +1190,7 @@ mod test_headers { let header = Header::from_info( genesis.header.payload_commitment(), - genesis.header.builder_commitment().clone(), + Some(genesis.header.builder_commitment().clone()), genesis.ns_table, &parent_leaf, L1Snapshot { @@ -983,15 +1198,16 @@ mod test_headers { finalized: self.l1_finalized, }, &self.l1_deposits, - BuilderFee { + vec![BuilderFee { fee_account, fee_amount, fee_signature, - }, + }], self.timestamp, validated_state.clone(), genesis.instance_state.chain_config, Version { major: 0, minor: 1 }, + None, ) .unwrap(); assert_eq!(header.height(), parent.height() + 1); @@ -1289,8 +1505,7 @@ mod test_headers { #[async_std::test] async fn test_validate_proposal_success() { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = Anvil::new().block_time(1u32).spawn(); let mut genesis_state = @@ -1340,7 +1555,7 @@ mod test_headers { fee_account: key_pair.fee_account(), fee_signature, }; - let proposal = Header::new( + let proposal = Header::new_legacy( &forgotten_state, &genesis_state, &parent_leaf, @@ -1407,8 +1622,7 @@ mod test_headers { #[async_std::test] async fn test_versioned_header_serialization() { - setup_logging(); - setup_backtrace(); + setup_test(); let genesis = GenesisForTest::default().await; let header = genesis.header.clone(); @@ -1417,7 +1631,7 @@ mod test_headers { let (fee_account, _) = FeeAccount::generated_from_seed_indexed([0; 32], 0); let v1_header = Header::create( - genesis.instance_state.chain_config.into(), + genesis.instance_state.chain_config, 1, 2, 3, @@ -1427,10 +1641,10 @@ mod test_headers { ns_table.clone(), header.fee_merkle_tree_root(), header.block_merkle_tree_root(), - FeeInfo { + vec![FeeInfo { amount: 0.into(), account: fee_account, - }, + }], Default::default(), Version { major: 0, minor: 1 }, ); @@ -1440,7 +1654,7 @@ mod test_headers { assert_eq!(v1_header, deserialized); let v2_header = Header::create( - genesis.instance_state.chain_config.into(), + genesis.instance_state.chain_config, 1, 2, 3, @@ -1450,10 +1664,10 @@ mod test_headers { ns_table.clone(), header.fee_merkle_tree_root(), header.block_merkle_tree_root(), - FeeInfo { + vec![FeeInfo { amount: 0.into(), account: fee_account, - }, + }], Default::default(), Version { major: 0, minor: 2 }, ); @@ -1463,7 +1677,7 @@ mod test_headers { assert_eq!(v2_header, deserialized); let v3_header = Header::create( - genesis.instance_state.chain_config.into(), + genesis.instance_state.chain_config, 1, 2, 3, @@ -1473,10 +1687,10 @@ mod test_headers { ns_table.clone(), header.fee_merkle_tree_root(), header.block_merkle_tree_root(), - FeeInfo { + vec![FeeInfo { amount: 0.into(), account: fee_account, - }, + }], Default::default(), Version { major: 0, minor: 3 }, ); diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index adf2c15e7..89e78ddf6 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -1,15 +1,44 @@ -use std::{collections::BTreeMap, sync::Arc}; - -use hotshot_types::{ - traits::{node_implementation::NodeType, states::InstanceState}, - HotShotConfig, +use crate::{ + v0::traits::StateCatchup, v0_3::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, + SeqTypes, Timestamp, Upgrade, UpgradeMode, }; +use hotshot_types::traits::{node_implementation::NodeType, states::InstanceState}; +use hotshot_types::HotShotConfig; +use std::{collections::BTreeMap, sync::Arc}; use vbs::version::{StaticVersionType, Version}; -use crate::{ - v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, PubKey, SeqTypes, Timestamp, - Upgrade, UpgradeMode, ValidatedState, -}; +use super::state::ValidatedState; + +/// Represents the immutable state of a node. +/// +/// For mutable state, use `ValidatedState`. +#[derive(Debug, Clone)] +pub struct NodeState { + pub node_id: u64, + pub chain_config: crate::v0_3::ChainConfig, + pub l1_client: L1Client, + pub peers: Arc, + pub genesis_header: GenesisHeader, + pub genesis_state: ValidatedState, + pub l1_genesis: Option, + + /// Map containing all planned and executed upgrades. + /// + /// Currently, only one upgrade can be executed at a time. + /// For multiple upgrades, the node needs to be restarted after each upgrade. + /// + /// This field serves as a record for planned and past upgrades, + /// listed in the genesis TOML file. It will be very useful if multiple upgrades + /// are supported in the future. + pub upgrades: BTreeMap, + /// Current version of the sequencer. + /// + /// This version is checked to determine if an upgrade is planned, + /// and which version variant for versioned types + /// to use in functions such as genesis. + /// (example: genesis returns V2 Header if version is 0.2) + pub current_version: Version, +} impl NodeState { pub fn new( diff --git a/types/src/v0/impls/l1.rs b/types/src/v0/impls/l1.rs index 771160abb..85e4a9be4 100644 --- a/types/src/v0/impls/l1.rs +++ b/types/src/v0/impls/l1.rs @@ -253,20 +253,18 @@ async fn get_finalized_block( #[cfg(test)] mod test { - use std::ops::Add; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use contract_bindings::fee_contract::FeeContract; use ethers::utils::{parse_ether, Anvil}; + use sequencer_utils::test_utils::setup_test; use super::*; use crate::NodeState; #[async_std::test] async fn test_l1_block_fetching() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); + setup_test(); // Test l1_client methods against `ethers::Provider`. There is // also some sanity testing demonstrating `Anvil` availability. @@ -303,8 +301,7 @@ mod test { #[async_std::test] async fn test_get_finalized_deposits() -> anyhow::Result<()> { - setup_logging(); - setup_backtrace(); + setup_test(); // how many deposits will we make let deposits = 5; @@ -446,8 +443,7 @@ mod test { #[async_std::test] async fn test_wait_for_finalized_block() { - setup_logging(); - setup_backtrace(); + setup_test(); let anvil = Anvil::new().block_time(1u32).spawn(); let l1_client = L1Client::new(anvil.endpoint().parse().unwrap(), 1); diff --git a/types/src/v0/impls/mod.rs b/types/src/v0/impls/mod.rs index 91aad8595..9b02a921a 100644 --- a/types/src/v0/impls/mod.rs +++ b/types/src/v0/impls/mod.rs @@ -7,10 +7,11 @@ mod fee_info; mod header; mod instance_state; mod l1; +mod solver; mod state; mod transaction; pub use fee_info::FeeError; pub use header::ProposalValidationError; -pub use instance_state::mock; -pub use state::{validate_proposal, BuilderValidationError, StateValidationError}; +pub use instance_state::{mock, NodeState}; +pub use state::{validate_proposal, BuilderValidationError, StateValidationError, ValidatedState}; diff --git a/types/src/v0/impls/solver.rs b/types/src/v0/impls/solver.rs new file mode 100644 index 000000000..fb40e5d30 --- /dev/null +++ b/types/src/v0/impls/solver.rs @@ -0,0 +1,67 @@ +use committable::{Commitment, Committable}; +use hotshot::types::SignatureKey; + +use super::v0_3::{RollupRegistrationBody, RollupUpdatebody}; + +impl Committable for RollupRegistrationBody { + fn tag() -> String { + "ROLLUP_REGISTRATION".to_string() + } + + fn commit(&self) -> Commitment { + let mut comm = committable::RawCommitmentBuilder::new(&Self::tag()) + .u64_field("namespace_id", u64::from(self.namespace_id)) + .var_size_field("reserve_url", self.reserve_url.as_str().as_ref()) + .fixed_size_field("reserve_price", &self.reserve_price.to_fixed_bytes()) + .fixed_size_field("active", &[u8::from(self.active)]) + .constant_str("signature_keys"); + + for key in self.signature_keys.iter() { + comm = comm.var_size_bytes(&key.to_bytes()); + } + + comm = comm + .var_size_field("signature_key", &self.signature_key.to_bytes()) + .var_size_field("text", self.text.as_bytes()); + + comm.finalize() + } +} + +impl Committable for RollupUpdatebody { + fn tag() -> String { + "ROLLUP_UPDATE".to_string() + } + + fn commit(&self) -> Commitment { + let mut comm = committable::RawCommitmentBuilder::new(&Self::tag()) + .u64_field("namespace_id", u64::from(self.namespace_id)); + + if let Some(reserve_url) = &self.reserve_url { + comm = comm.var_size_field("reserve_url", reserve_url.as_str().as_ref()) + } + + if let Some(rp) = self.reserve_price { + comm = comm.fixed_size_field("reserve_price", &rp.to_fixed_bytes()) + }; + + if let Some(active) = self.active { + comm = comm.fixed_size_field("active", &[u8::from(active)]); + } + + if let Some(keys) = &self.signature_keys { + comm = comm.constant_str("signature_keys"); + for key in keys.iter() { + comm = comm.var_size_bytes(&key.to_bytes()); + } + } + + comm = comm.var_size_field("signature_key", &self.signature_key.to_bytes()); + + if let Some(text) = &self.text { + comm = comm.var_size_field("text", text.as_bytes()); + } + + comm.finalize() + } +} diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 34b2e6427..4596ae18c 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -1,5 +1,3 @@ -use std::ops::Add; - use anyhow::bail; use committable::{Commitment, Committable}; use ethers::types::Address; @@ -21,14 +19,20 @@ use jf_merkle_tree::{ }; use jf_vid::VidScheme; use num_traits::CheckedSub; +use serde::{Deserialize, Serialize}; +use std::ops::Add; use thiserror::Error; use vbs::version::Version; -use super::{fee_info::FeeError, header::ProposalValidationError}; +use super::{ + auction::ExecutionError, fee_info::FeeError, header::ProposalValidationError, + instance_state::NodeState, +}; use crate::{ - BlockMerkleTree, ChainConfig, Delta, FeeAccount, FeeAmount, FeeInfo, FeeMerkleTree, Header, - Leaf, NodeState, NsTableValidationError, PayloadByteLen, ResolvableChainConfig, SeqTypes, - UpgradeType, ValidatedState, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, + v0_3::{ChainConfig, FullNetworkTx, IterableFeeInfo, ResolvableChainConfig}, + BlockMerkleTree, Delta, FeeAccount, FeeAmount, FeeInfo, FeeMerkleTree, Header, Leaf, + NsTableValidationError, PayloadByteLen, SeqTypes, UpgradeType, BLOCK_MERKLE_TREE_HEIGHT, + FEE_MERKLE_TREE_HEIGHT, }; /// Possible builder validation failures @@ -53,6 +57,15 @@ pub enum StateValidationError { impl StateDelta for Delta {} +#[derive(Hash, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +pub struct ValidatedState { + /// Frontier of Block Merkle Tree + pub block_merkle_tree: BlockMerkleTree, + /// Fee Merkle Tree + pub fee_merkle_tree: FeeMerkleTree, + pub chain_config: ResolvableChainConfig, +} + impl Default for ValidatedState { fn default() -> Self { let block_merkle_tree = BlockMerkleTree::from_elems( @@ -229,11 +242,18 @@ pub fn validate_proposal( }); } - if proposal.fee_info().amount() < expected_chain_config.base_fee * block_size { + // Validate that sum of fees is at least `base_fee * blocksize`. + // TODO this should be updated to `base_fee * bundle_size` when we have + // VID per bundle or namespace. + let Some(amount) = proposal.fee_info().amount() else { + return Err(ProposalValidationError::SomeFeeAmountOutOfRange); + }; + + if amount < expected_chain_config.base_fee * block_size { return Err(ProposalValidationError::InsufficientFee { max_block_size: expected_chain_config.max_block_size, base_fee: expected_chain_config.base_fee, - proposed_fee: proposal.fee_info().amount(), + proposed_fee: amount, }); } @@ -283,32 +303,51 @@ impl From for FeeError { fn charge_fee( state: &mut ValidatedState, delta: &mut Delta, - fee_info: FeeInfo, + fee_info: Vec, recipient: FeeAccount, ) -> Result<(), FeeError> { - state.charge_fee(fee_info, recipient)?; - delta.fees_delta.extend([fee_info.account, recipient]); + for fee_info in fee_info { + state.charge_fee(fee_info, recipient)?; + delta.fees_delta.extend([fee_info.account, recipient]); + } Ok(()) } -/// Validate builder account by verifying signature +/// Validate builder accounts by verifying signatures. All fees are +/// verified against signature by index. fn validate_builder_fee(proposed_header: &Header) -> Result<(), BuilderValidationError> { - // Beware of Malice! - let signature = proposed_header - .builder_signature() - .ok_or(BuilderValidationError::SignatureNotFound)?; - let fee_amount = proposed_header.fee_info().amount().as_u64().ok_or( - BuilderValidationError::FeeAmountOutOfRange(proposed_header.fee_info().amount()), - )?; - - // verify signature - if !proposed_header.fee_info().account.validate_fee_signature( - &signature, - fee_amount, - proposed_header.metadata(), - &proposed_header.payload_commitment(), - ) { - return Err(BuilderValidationError::InvalidBuilderSignature); + // TODO since we are iterating, should we include account/amount in errors? + for (fee_info, signature) in proposed_header + .fee_info() + .iter() + .zip(proposed_header.builder_signature()) + { + // check that `amount` fits in a u64 + fee_info + .amount() + .as_u64() + .ok_or(BuilderValidationError::FeeAmountOutOfRange(fee_info.amount))?; + + // verify signature, accept any verification that succeeds + fee_info + .account() + .validate_sequencing_fee_signature_marketplace( + &signature, + fee_info.amount().as_u64().unwrap(), + ) + .then_some(()) + .or_else(|| { + fee_info + .account() + .validate_fee_signature( + &signature, + fee_info.amount().as_u64().unwrap(), + proposed_header.metadata(), + &proposed_header.payload_commitment(), + ) + .then_some(()) + }) + .ok_or(BuilderValidationError::InvalidBuilderSignature)?; } Ok(()) @@ -329,7 +368,7 @@ impl ValidatedState { validated_state.apply_upgrade(instance, version); let chain_config = validated_state - .get_chain_config(instance, proposed_header.chain_config()) + .get_chain_config(instance, &proposed_header.chain_config()) .await?; if Some(chain_config) != validated_state.chain_config.resolve() { @@ -348,12 +387,10 @@ impl ValidatedState { // fee and the recipient account which is receiving it, plus any counts receiving deposits // in this block. let missing_accounts = self.forgotten_accounts( - [ - proposed_header.fee_info().account, - chain_config.fee_recipient, - ] - .into_iter() - .chain(l1_deposits.iter().map(|fee_info| fee_info.account)), + [chain_config.fee_recipient] + .into_iter() + .chain(proposed_header.fee_info().accounts()) + .chain(l1_deposits.accounts()), ); let parent_height = parent_leaf.height(); @@ -473,6 +510,15 @@ impl ValidatedState { } } +fn _apply_full_transactions( + validated_state: &mut ValidatedState, + full_network_txs: Vec, +) -> Result<(), ExecutionError> { + full_network_txs + .iter() + .try_for_each(|tx| tx.execute(validated_state)) +} + pub async fn get_l1_deposits( instance: &NodeState, header: &Header, @@ -607,7 +653,7 @@ impl HotShotState for ValidatedState { Self { fee_merkle_tree, block_merkle_tree, - chain_config: *block_header.chain_config(), + chain_config: block_header.chain_config(), } } /// Construct a genesis validated state. @@ -704,12 +750,46 @@ impl MerklizedState for FeeMerkleTree { mod test { use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use ethers::types::U256; + use hotshot_types::traits::{block_contents::BlockHeader, signature_key::BuilderSignatureKey}; use hotshot_types::vid::vid_scheme; use jf_vid::VidScheme; use sequencer_utils::ser::FromStringOrInteger; use super::*; - use crate::{BlockSize, FeeAccountProof, FeeMerkleProof}; + use crate::{ + eth_signature_key::{BuilderSignature, EthKeyPair}, + v0_1, + v0_3::BidTx, + BlockSize, FeeAccountProof, FeeMerkleProof, + }; + + pub fn mock_full_network_txs(key: Option) -> Vec { + // if no key is supplied, use `test_key_pair`. Since default `BidTxBody` is + // signed with `test_key_pair`, it will verify successfully + let key = key.unwrap_or_else(FeeAccount::test_key_pair); + vec![FullNetworkTx::Bid(BidTx::mock(key))] + } + + #[test] + #[ignore] + // TODO Currently we have some mismatch causing tests using + // `Leaf::genesis` to generate a a Header to + // fail. `NodeState::mock` is setting version to `v1` resulting in + // an empty `bid_recipient` field on chain_config. We need a way to + // pass in desired version, or so some other change before this can be enabled. + fn test_apply_full_tx() { + let mut state = ValidatedState::default(); + let txs = mock_full_network_txs(None); + // Default key can be verified b/c it is the same that signs the mock tx + _apply_full_transactions(&mut state, txs).unwrap(); + + // Tx will be invalid if it is signed by a different key than + // set in `account` field. + let key = FeeAccount::generated_from_seed_indexed([1; 32], 0).1; + let invalid = mock_full_network_txs(Some(key)); + let err = _apply_full_transactions(&mut state, invalid).unwrap_err(); + assert_eq!(ExecutionError::InvalidSignature, err); + } #[test] fn test_fee_proofs() { @@ -763,8 +843,10 @@ mod test { let instance = NodeState::mock().with_chain_config(ChainConfig { max_block_size: (MAX_BLOCK_SIZE as u64).into(), base_fee: 0.into(), - ..Default::default() + ..state.chain_config.resolve().unwrap() }); + // TODO this test will fail if we add `Some(bid_recipient)` (v3) to chain_config + // b/c version in `Leaf::genesis` is set to 1 let parent = Leaf::genesis(&instance.genesis_state, &instance).await; let header = parent.block_header(); @@ -798,8 +880,10 @@ mod test { let instance = NodeState::mock().with_chain_config(ChainConfig { base_fee: 1000.into(), // High base fee max_block_size: max_block_size.into(), - ..Default::default() + ..state.chain_config.resolve().unwrap() }); + // TODO this test will fail if we add `Some(bid_recipient)` (v3) to chain_config + // b/c version in `Leaf::genesis` is set to 1 let parent = Leaf::genesis(&instance.genesis_state, &instance).await; let header = parent.block_header(); @@ -812,7 +896,7 @@ mod test { ProposalValidationError::InsufficientFee { max_block_size: instance.chain_config.max_block_size, base_fee: instance.chain_config.base_fee, - proposed_fee: header.fee_info().amount() + proposed_fee: header.fee_info().amount().unwrap() }, err ); @@ -921,4 +1005,80 @@ mod test { bincode::serialize(&amt).unwrap(), ); } + + #[async_std::test] + async fn test_validate_builder_fee() { + setup_logging(); + setup_backtrace(); + + let max_block_size = 10; + + let validated_state = ValidatedState::default(); + let instance_state = NodeState::mock().with_chain_config(ChainConfig { + base_fee: 1000.into(), // High base fee + max_block_size: max_block_size.into(), + ..validated_state.chain_config.resolve().unwrap() + }); + + let parent = Leaf::genesis(&instance_state.genesis_state, &instance_state).await; + let header = parent.block_header().clone(); + let metadata = parent.block_header().metadata(); + let vid_commitment = parent.payload_commitment(); + + dbg!(header.version()); + + let key_pair = EthKeyPair::random(); + let account = key_pair.fee_account(); + + let data = header.fee_info()[0].amount().as_u64().unwrap(); + let sig = FeeAccount::sign_builder_message(&key_pair, &data.to_be_bytes()).unwrap(); + + // ensure the signature is indeed valid + account + .validate_builder_signature(&sig, &data.to_be_bytes()) + .then_some(()) + .unwrap(); + + // test v1 sig + let sig = FeeAccount::sign_fee(&key_pair, data, metadata, &vid_commitment).unwrap(); + + let header = match header { + Header::V1(header) => Header::V1(v0_1::Header { + builder_signature: Some(sig), + fee_info: FeeInfo::new(account, data), + ..header + }), + _ => unimplemented!(), + }; + + validate_builder_fee(&header).unwrap(); + + // test v3 sig + let sig = FeeAccount::sign_sequencing_fee_marketplace(&key_pair, data).unwrap(); + // test dedicated marketplace validation function + account + .validate_sequencing_fee_signature_marketplace(&sig, data) + .then_some(()) + .unwrap(); + + let header = match header { + Header::V1(header) => Header::V1(v0_1::Header { + builder_signature: Some(sig), + fee_info: FeeInfo::new(account, data), + ..header + }), + _ => unimplemented!(), + }; + + let sig: Vec = header.builder_signature(); + let fee = header.fee_info()[0].amount().as_u64().unwrap(); + + // assert expectations + account + .validate_sequencing_fee_signature_marketplace(&sig[0], fee) + .then_some(()) + .unwrap(); + + validate_builder_fee(&header).unwrap(); + } } diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index e683ac714..aaf37614c 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -69,7 +69,6 @@ reexport_unchanged_types!( BlockMerkleCommitment, BlockMerkleTree, BuilderSignature, - ChainConfig, ChainId, Delta, FeeAccount, @@ -85,7 +84,6 @@ reexport_unchanged_types!( L1Client, L1Snapshot, NamespaceId, - NodeState, NsIndex, NsIter, NsPayload, @@ -103,7 +101,6 @@ reexport_unchanged_types!( NumTxsUnchecked, Payload, PayloadByteLen, - ResolvableChainConfig, Transaction, TxIndex, TxIter, @@ -117,7 +114,6 @@ reexport_unchanged_types!( UpgradeMode, TimeBasedUpgrade, ViewBasedUpgrade, - ValidatedState, BlockSize, ); @@ -151,6 +147,7 @@ pub type PrivKey = ::PrivateKey; pub type NetworkConfig = hotshot_orchestrator::config::NetworkConfig; +pub use self::impls::{NodeState, ValidatedState}; pub use crate::v0_1::{ BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index ee976453a..de07e77c0 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -22,10 +22,12 @@ use hotshot_types::{ use serde::{de::DeserializeOwned, Serialize}; use crate::{ - AccountQueryData, BackoffParams, BlockMerkleTree, ChainConfig, Event, FeeAccount, - FeeMerkleCommitment, Leaf, NetworkConfig, NodeState, SeqTypes, ValidatedState, + v0::impls::ValidatedState, v0_3::ChainConfig, AccountQueryData, BackoffParams, BlockMerkleTree, + Event, FeeAccount, FeeMerkleCommitment, Leaf, NetworkConfig, SeqTypes, }; +use super::impls::NodeState; + #[async_trait] pub trait StateCatchup: Send + Sync + std::fmt::Debug { /// Try to fetch the given account state, failing without retrying if unable. diff --git a/types/src/v0/v0_1/chain_config.rs b/types/src/v0/v0_1/chain_config.rs index e74e36fda..d6f5f8bc2 100644 --- a/types/src/v0/v0_1/chain_config.rs +++ b/types/src/v0/v0_1/chain_config.rs @@ -1,4 +1,4 @@ -use committable::Commitment; +use committable::{Commitment, Committable}; use derive_more::{Deref, Display, From, Into}; use ethers::types::{Address, U256}; use itertools::Either; @@ -46,3 +46,66 @@ pub struct ChainConfig { pub struct ResolvableChainConfig { pub(crate) chain_config: Either>, } + +impl Committable for ChainConfig { + fn tag() -> String { + "CHAIN_CONFIG".to_string() + } + + fn commit(&self) -> Commitment { + let comm = committable::RawCommitmentBuilder::new(&Self::tag()) + .fixed_size_field("chain_id", &self.chain_id.to_fixed_bytes()) + .u64_field("max_block_size", *self.max_block_size) + .fixed_size_field("base_fee", &self.base_fee.to_fixed_bytes()) + .fixed_size_field("fee_recipient", &self.fee_recipient.to_fixed_bytes()); + let comm = if let Some(addr) = self.fee_contract { + comm.u64_field("fee_contract", 1).fixed_size_bytes(&addr.0) + } else { + comm.u64_field("fee_contract", 0) + }; + comm.finalize() + } +} + +impl ResolvableChainConfig { + pub fn commit(&self) -> Commitment { + match self.chain_config { + Either::Left(config) => config.commit(), + Either::Right(commitment) => commitment, + } + } + pub fn resolve(self) -> Option { + match self.chain_config { + Either::Left(config) => Some(config), + Either::Right(_) => None, + } + } +} + +impl From> for ResolvableChainConfig { + fn from(value: Commitment) -> Self { + Self { + chain_config: Either::Right(value), + } + } +} + +impl From for ResolvableChainConfig { + fn from(value: ChainConfig) -> Self { + Self { + chain_config: Either::Left(value), + } + } +} + +impl Default for ChainConfig { + fn default() -> Self { + Self { + chain_id: U256::from(35353).into(), // arbitrarily chosen chain ID + max_block_size: 30720.into(), + base_fee: 0.into(), + fee_contract: None, + fee_recipient: Default::default(), + } + } +} diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index 6007bed21..2d2333fb2 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -1,16 +1,7 @@ -use std::collections::BTreeMap; - -use std::sync::Arc; - use serde::{Deserialize, Serialize}; use std::fmt::Debug; -use crate::{ - v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, Timestamp, ValidatedState, -}; -use vbs::version::Version; - -use super::l1::L1Client; +use crate::{v0_3::ChainConfig, Timestamp}; /// Represents the specific type of upgrade. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] @@ -66,34 +57,3 @@ pub struct Upgrade { /// The type of the upgrade. pub upgrade_type: UpgradeType, } - -/// Represents the immutable state of a node. -/// -/// For mutable state, use `ValidatedState`. -#[derive(Debug, Clone)] -pub struct NodeState { - pub node_id: u64, - pub chain_config: ChainConfig, - pub l1_client: L1Client, - pub peers: Arc, - pub genesis_header: GenesisHeader, - pub genesis_state: ValidatedState, - pub l1_genesis: Option, - - /// Map containing all planned and executed upgrades. - /// - /// Currently, only one upgrade can be executed at a time. - /// For multiple upgrades, the node needs to be restarted after each upgrade. - /// - /// This field serves as a record for planned and past upgrades, - /// listed in the genesis TOML file. It will be very useful if multiple upgrades - /// are supported in the future. - pub upgrades: BTreeMap, - /// Current version of the sequencer. - /// - /// This version is checked to determine if an upgrade is planned, - /// and which version variant for versioned types - /// to use in functions such as genesis. - /// (example: genesis returns V2 Header if version is 0.2) - pub current_version: Version, -} diff --git a/types/src/v0/v0_1/state.rs b/types/src/v0/v0_1/state.rs index c9906f7be..5dcdc0444 100644 --- a/types/src/v0/v0_1/state.rs +++ b/types/src/v0/v0_1/state.rs @@ -1,7 +1,5 @@ -use crate::{Header, ResolvableChainConfig}; - use super::{FeeAccount, FeeAmount}; - +use crate::Header; use committable::Commitment; use jf_merkle_tree::{ prelude::{LightWeightSHA3MerkleTree, Sha3Digest, Sha3Node}, @@ -9,6 +7,7 @@ use jf_merkle_tree::{ MerkleTreeScheme, }; use serde::{Deserialize, Serialize}; +use std::collections::HashSet; // The block merkle tree accumulates header commitments. However, since the underlying // representation of the commitment type remains the same even while the header itself changes, @@ -19,22 +18,9 @@ pub type BlockMerkleCommitment = ::Commitme pub type FeeMerkleTree = UniversalMerkleTree; pub type FeeMerkleCommitment = ::Commitment; -use core::fmt::Debug; - -use std::collections::HashSet; - pub const BLOCK_MERKLE_TREE_HEIGHT: usize = 32; pub const FEE_MERKLE_TREE_HEIGHT: usize = 20; -#[derive(Hash, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -pub struct ValidatedState { - /// Frontier of Block Merkle Tree - pub block_merkle_tree: BlockMerkleTree, - /// Fee Merkle Tree - pub fee_merkle_tree: FeeMerkleTree, - pub chain_config: ResolvableChainConfig, -} - #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] pub struct Delta { pub fees_delta: HashSet, diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs index f71f78fb9..56ff34d45 100644 --- a/types/src/v0/v0_2/mod.rs +++ b/types/src/v0/v0_2/mod.rs @@ -5,14 +5,13 @@ pub use super::v0_1::{ AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, - L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, - NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, - NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, - ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, - TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, - UpgradeType, ValidatedState, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, - FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, - TX_OFFSET_BYTE_LEN, + L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, + NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, + NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, ResolvableChainConfig, + TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, + TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, + BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, + NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; pub const VERSION: Version = Version { major: 0, minor: 2 }; diff --git a/types/src/v0/v0_3/auction.rs b/types/src/v0/v0_3/auction.rs index a44e36800..0ba19bc63 100644 --- a/types/src/v0/v0_3/auction.rs +++ b/types/src/v0/v0_3/auction.rs @@ -42,7 +42,7 @@ pub struct BidTxBody { /// The results of an Auction #[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] -pub struct AuctionResults { +pub struct SolverAuctionResults { /// view number the results are for pub(crate) view_number: ViewNumber, /// A list of the bid txs that won diff --git a/types/src/v0/v0_3/chain_config.rs b/types/src/v0/v0_3/chain_config.rs new file mode 100644 index 000000000..7fcfd7253 --- /dev/null +++ b/types/src/v0/v0_3/chain_config.rs @@ -0,0 +1,175 @@ +use crate::{v0_1, BlockSize, ChainId, FeeAccount, FeeAmount}; +use committable::{Commitment, Committable}; +use ethers::types::{Address, U256}; +use itertools::Either; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; + +/// Global variables for an Espresso blockchain. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ChainConfig { + /// Espresso chain ID + pub chain_id: ChainId, + + /// Maximum size in bytes of a block + pub max_block_size: BlockSize, + + /// Minimum fee in WEI per byte of payload + pub base_fee: FeeAmount, + + /// Fee contract address on L1. + /// + /// This is optional so that fees can easily be toggled on/off, with no need to deploy a + /// contract when they are off. In a future release, after fees are switched on and thoroughly + /// tested, this may be made mandatory. + pub fee_contract: Option
, + + /// Account that receives sequencing fees. + /// + /// This account in the Espresso fee ledger will always receive every fee paid in Espresso, + /// regardless of whether or not their is a `fee_contract` deployed. Once deployed, the fee + /// contract can decide what to do with tokens locked in this account in Espresso. + pub fee_recipient: FeeAccount, + + /// Account that receives sequencing bids. + pub bid_recipient: Option, +} + +#[derive(Clone, Debug, Copy, PartialEq, Deserialize, Serialize, Eq, Hash)] +/// A commitment to a ChainConfig or a full ChainConfig. +pub struct ResolvableChainConfig { + pub(crate) chain_config: Either>, +} + +impl Committable for ChainConfig { + fn tag() -> String { + "CHAIN_CONFIG".to_string() + } + + fn commit(&self) -> Commitment { + let comm = committable::RawCommitmentBuilder::new(&Self::tag()) + .fixed_size_field("chain_id", &self.chain_id.to_fixed_bytes()) + .u64_field("max_block_size", *self.max_block_size) + .fixed_size_field("base_fee", &self.base_fee.to_fixed_bytes()) + .fixed_size_field("fee_recipient", &self.fee_recipient.to_fixed_bytes()); + let comm = if let Some(addr) = self.fee_contract { + comm.u64_field("fee_contract", 1).fixed_size_bytes(&addr.0) + } else { + comm.u64_field("fee_contract", 0) + }; + + // With `ChainConfig` upgrades we want commitments w/out + // fields added >= v0_3 to have the same commitment as <= v0_3 + // commitment. Therefore `None` values are simply ignored. + let comm = if let Some(bid_recipient) = self.bid_recipient { + comm.fixed_size_field("bid_recipient", &bid_recipient.to_fixed_bytes()) + } else { + comm + }; + + comm.finalize() + } +} + +impl ResolvableChainConfig { + pub fn commit(&self) -> Commitment { + match self.chain_config { + Either::Left(config) => config.commit(), + Either::Right(commitment) => commitment, + } + } + pub fn resolve(self) -> Option { + match self.chain_config { + Either::Left(config) => Some(config), + Either::Right(_) => None, + } + } +} + +impl From> for ResolvableChainConfig { + fn from(value: Commitment) -> Self { + Self { + chain_config: Either::Right(value), + } + } +} + +impl From for ResolvableChainConfig { + fn from(value: ChainConfig) -> Self { + Self { + chain_config: Either::Left(value), + } + } +} + +impl From<&v0_1::ResolvableChainConfig> for ResolvableChainConfig { + fn from( + &v0_1::ResolvableChainConfig { chain_config }: &v0_1::ResolvableChainConfig, + ) -> ResolvableChainConfig { + match chain_config { + Either::Left(chain_config) => ResolvableChainConfig { + chain_config: Either::Left(ChainConfig::from(chain_config)), + }, + // TODO does this work? is there a better way? + Either::Right(c) => ResolvableChainConfig { + chain_config: Either::Right(Commitment::from_str(&c.to_string()).unwrap()), + }, + } + } +} + +impl From for ChainConfig { + fn from(chain_config: v0_1::ChainConfig) -> ChainConfig { + let v0_1::ChainConfig { + chain_id, + max_block_size, + base_fee, + fee_contract, + fee_recipient, + .. + } = chain_config; + + ChainConfig { + chain_id, + max_block_size, + base_fee, + fee_contract, + fee_recipient, + bid_recipient: None, + } + } +} + +impl From for v0_1::ChainConfig { + fn from(chain_config: ChainConfig) -> v0_1::ChainConfig { + let ChainConfig { + chain_id, + max_block_size, + base_fee, + fee_contract, + fee_recipient, + .. + } = chain_config; + + v0_1::ChainConfig { + chain_id, + max_block_size, + base_fee, + fee_contract, + fee_recipient, + } + } +} + +impl Default for ChainConfig { + fn default() -> Self { + Self { + chain_id: U256::from(35353).into(), // arbitrarily chosen chain ID + max_block_size: 30720.into(), + base_fee: 0.into(), + fee_contract: None, + fee_recipient: Default::default(), + bid_recipient: None, + } + } +} diff --git a/types/src/v0/v0_3/fee_info.rs b/types/src/v0/v0_3/fee_info.rs new file mode 100644 index 000000000..0b5754619 --- /dev/null +++ b/types/src/v0/v0_3/fee_info.rs @@ -0,0 +1,7 @@ +use crate::{FeeAccount, FeeAmount}; + +/// Methods for use w/ Vec +pub trait IterableFeeInfo { + fn amount(&self) -> Option; + fn accounts(&self) -> Vec; +} diff --git a/types/src/v0/v0_3/header.rs b/types/src/v0/v0_3/header.rs index 29ca7d26d..bcf70e080 100644 --- a/types/src/v0/v0_3/header.rs +++ b/types/src/v0/v0_3/header.rs @@ -1,17 +1,17 @@ -use crate::NsTable; - use super::{ BlockMerkleCommitment, BuilderSignature, FeeInfo, FeeMerkleCommitment, L1BlockInfo, - ResolvableChainConfig, + ResolvableChainConfig, SolverAuctionResults, }; +use crate::NsTable; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; use serde::{Deserialize, Serialize}; -// TODO : marketplace header #[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +/// A header is like a [`Block`] with the body replaced by a digest. pub struct Header { + /// A commitment to a ChainConfig or a full ChainConfig. pub(crate) chain_config: ResolvableChainConfig, pub(crate) height: u64, pub(crate) timestamp: u64, @@ -22,8 +22,9 @@ pub struct Header { pub(crate) ns_table: NsTable, pub(crate) block_merkle_tree_root: BlockMerkleCommitment, pub(crate) fee_merkle_tree_root: FeeMerkleCommitment, - pub(crate) fee_info: FeeInfo, - pub(crate) builder_signature: Option, + pub(crate) fee_info: Vec, + pub(crate) builder_signature: Vec, + pub(crate) auction_results: SolverAuctionResults, } impl Committable for Header { @@ -50,11 +51,15 @@ impl Committable for Header { .field("ns_table", self.ns_table.commit()) .var_size_field("block_merkle_tree_root", &bmt_bytes) .var_size_field("fee_merkle_tree_root", &fmt_bytes) - .field("fee_info", self.fee_info.commit()) + .var_size_field("fee_info", &bincode::serialize(&self.fee_info).unwrap()) + .var_size_field( + "auction_results", + &bincode::serialize(&self.auction_results).unwrap(), + ) .finalize() } fn tag() -> String { - "BLOCK".into() + crate::v0_1::Header::tag() } } diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index f8a6ef1b6..7cb1162c8 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -2,23 +2,27 @@ use vbs::version::Version; // Re-export types which haven't changed since the last minor version. pub use super::v0_1::{ - AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, - ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, - FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, - L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, - NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, - NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, - ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainId, + Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, FeeMerkleCommitment, FeeMerkleProof, + FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1Snapshot, NamespaceId, NsIndex, NsIter, + NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, + NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, + Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, - UpgradeType, ValidatedState, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, - FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, - TX_OFFSET_BYTE_LEN, + UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, + NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; pub const VERSION: Version = Version { major: 0, minor: 3 }; mod auction; +mod chain_config; +mod fee_info; mod header; +mod solver; -pub use auction::{AuctionResults, BidTx, BidTxBody, FullNetworkTx}; +pub use auction::{BidTx, BidTxBody, FullNetworkTx, SolverAuctionResults}; +pub use chain_config::*; +pub use fee_info::IterableFeeInfo; pub use header::Header; +pub use solver::*; diff --git a/types/src/v0/v0_3/solver.rs b/types/src/v0/v0_3/solver.rs new file mode 100644 index 000000000..316efea04 --- /dev/null +++ b/types/src/v0/v0_3/solver.rs @@ -0,0 +1,53 @@ +use crate::{FeeAmount, NamespaceId, SeqTypes}; +use hotshot::types::SignatureKey; +use hotshot_types::traits::node_implementation::NodeType; +use serde::{Deserialize, Serialize}; +use tide_disco::Url; + +#[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] +pub struct RollupRegistration { + pub body: RollupRegistrationBody, + // signature over the above data (must be from a key in the 'signature_keys` list) + pub signature: + <::SignatureKey as SignatureKey>::PureAssembledSignatureType, +} + +#[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] +pub struct RollupRegistrationBody { + pub namespace_id: NamespaceId, + pub reserve_url: Url, + // Denominated in Wei + pub reserve_price: FeeAmount, + // whether this registration is active in the marketplace + pub active: bool, + // a list of keys authorized to update the registration information + pub signature_keys: Vec<::SignatureKey>, + // The signature key used to sign this registration body + pub signature_key: ::SignatureKey, + // Optional field for human readable information + pub text: String, +} + +#[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] +pub struct RollupUpdate { + pub body: RollupUpdatebody, + // signature over the above data (must be from a key in the 'signature_keys` list) + pub signature: + <::SignatureKey as SignatureKey>::PureAssembledSignatureType, +} + +#[derive(PartialEq, Serialize, Deserialize, Debug, Clone)] +pub struct RollupUpdatebody { + pub namespace_id: NamespaceId, + // Denominated in Wei + pub reserve_url: Option, + pub reserve_price: Option, + // whether this registration is active in the marketplace + pub active: Option, + // a list of keys authorized to update the registration information + pub signature_keys: Option::SignatureKey>>, + // The signature key used to sign this update body + pub signature_key: ::SignatureKey, + // Optional field for human readable information + pub text: Option, +} diff --git a/utils/Cargo.toml b/utils/Cargo.toml index bfd401dc0..07eb5aa74 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [dependencies] anyhow = { workspace = true } ark-serialize = { workspace = true, features = ["derive"] } +async-compatibility-layer = { workspace = true } async-std = { workspace = true } clap = { workspace = true } committable = "0.2" @@ -16,6 +17,7 @@ derive_more = { workspace = true } ethers = { workspace = true } futures = { workspace = true } hotshot-contract-adapter = { workspace = true } +log-panics = { workspace = true } portpicker = { workspace = true } serde = { workspace = true } serde_json = "^1.0.113" diff --git a/utils/src/lib.rs b/utils/src/lib.rs index f1e038830..d4fcea03d 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -21,6 +21,7 @@ use tempfile::TempDir; use url::Url; pub mod deployer; +pub mod logging; pub mod ser; pub mod test_utils; diff --git a/utils/src/logging.rs b/utils/src/logging.rs new file mode 100644 index 000000000..665731b6d --- /dev/null +++ b/utils/src/logging.rs @@ -0,0 +1,47 @@ +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use clap::{Parser, ValueEnum}; +use log_panics::BacktraceMode; + +/// Controls how backtraces are logged on panic. +/// +/// The values here match the possible values of `RUST_LOG_FORMAT`, and their corresponding behavior +/// on backtrace logging is: +/// * `full`: print a prettified dump of the stack trace and span trace to stdout, optimized for +/// human readability rather than machine parsing +/// * `compact`: output the default panic message, with backtraces controlled by `RUST_BACKTRACE` +/// * `json`: output the panic message and stack trace as a tracing event. This in turn works with +/// the behavior of the tracing subscriber with `RUST_LOG_FORMAT=json` to output the event in a +/// machine-parseable, JSON format. +#[derive(Clone, Copy, Debug, Default, ValueEnum)] +enum BacktraceLoggingMode { + #[default] + Full, + Compact, + Json, +} + +/// Logging configuration. +#[derive(Clone, Debug, Default, Parser)] +pub struct Config { + #[clap(long, env = "RUST_LOG_FORMAT")] + backtrace_mode: Option, +} + +impl Config { + /// Get the logging configuration from the environment. + pub fn from_env() -> Self { + Self::parse_from(std::iter::empty::()) + } + + /// Initialize logging and panic handlers based on this configuration. + pub fn init(&self) { + setup_logging(); + match self.backtrace_mode.unwrap_or_default() { + BacktraceLoggingMode::Full => setup_backtrace(), + BacktraceLoggingMode::Compact => {} + BacktraceLoggingMode::Json => log_panics::Config::new() + .backtrace_mode(BacktraceMode::Resolved) + .install_panic_hook(), + } + } +} diff --git a/utils/src/test_utils.rs b/utils/src/test_utils.rs index 8b0033a97..a41a2ce97 100644 --- a/utils/src/test_utils.rs +++ b/utils/src/test_utils.rs @@ -90,3 +90,7 @@ impl TestL1System { }) } } + +pub fn setup_test() { + super::logging::Config::from_env().init(); +}