diff --git a/.dockerignore b/.dockerignore index c32286be6a01..39efdabca19a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -39,7 +39,7 @@ contracts/.git !etc/multivm_bootloaders !cargo !bellman-cuda -!prover/crates/bin/vk_setup_data_generator_server_fri/data/ +!prover/data/ !.github/release-please/manifest.json !etc/env/file_based diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 232939b78334..811c773b6f54 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.21.0", - "prover": "16.4.0", + "core": "24.24.0", + "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index 2b24801d065f..db7c4ba387f4 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -28,7 +28,7 @@ jobs: name: Build and Push Docker Images env: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - runs-on: ${{ fromJSON('["matterlabs-ci-runner", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} strategy: matrix: components: @@ -149,7 +149,8 @@ jobs: create_manifest: name: Create release manifest - runs-on: matterlabs-ci-runner + # TODO: After migraton switch to CI + runs-on: matterlabs-default-infra-runners needs: build-images if: ${{ inputs.action == 'push' }} strategy: diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 4ead6cb746dd..7e5dcc10a939 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -33,7 +33,7 @@ jobs: name: Build and Push Docker Images env: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.components == 'external-node') && '-alpha' || '' }} - runs-on: ${{ fromJSON('["matterlabs-ci-runner", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} strategy: matrix: components: @@ -158,7 +158,8 @@ jobs: create_manifest: name: Create release manifest - runs-on: matterlabs-ci-runner + # TODO: After migraton switch to CI + runs-on: matterlabs-default-infra-runners needs: build-images if: ${{ inputs.action == 'push' }} strategy: diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index cd222a6e43bb..29d26a713d89 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -23,7 +23,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [ubuntu-latest] + runs-on: [ ubuntu-latest ] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -48,7 +48,7 @@ jobs: build-push-core-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-core-template.yml if: contains(github.ref_name, 'core') secrets: @@ -60,7 +60,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-tee-prover-template.yml if: contains(github.ref_name, 'core') secrets: @@ -72,7 +72,7 @@ jobs: build-push-contract-verifier: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-contract-verifier-template.yml if: contains(github.ref_name, 'contract_verifier') secrets: @@ -83,26 +83,26 @@ jobs: build-push-prover-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-prover-template.yml if: contains(github.ref_name, 'prover') with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-witness-generator-template.yml if: contains(github.ref_name, 'prover') with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} @@ -110,7 +110,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: contains(github.ref_name, 'prover') with: diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index e5e8fb69fb1d..7f36f28f2864 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -16,7 +16,7 @@ on: jobs: build-images: name: Local Node - Build and Push Docker Image - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-high-performance] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index 9740cafd9678..c0ea060b07e9 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -19,7 +19,7 @@ on: jobs: build-gar-prover-fri-gpu: name: Build prover FRI GPU GAR - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-high-performance] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -34,13 +34,13 @@ jobs: gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/prover-gpu-fri-gar build-args: | diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 7591c45b49e4..4f3cad7f1d02 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner ] + runs-on: [ matterlabs-ci-runner-high-performance ] strategy: matrix: component: @@ -171,7 +171,8 @@ jobs: env: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} PROTOCOL_VERSION: ${{ needs.build-images.outputs.protocol_version }} - runs-on: matterlabs-ci-runner + # TODO: After migraton switch to CI + runs-on: matterlabs-default-infra-runners if: ${{ inputs.action == 'push' }} strategy: matrix: @@ -179,7 +180,7 @@ jobs: - witness-vector-generator steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to us-central1 GAR run: | diff --git a/.github/workflows/build-tee-prover-template.yml b/.github/workflows/build-tee-prover-template.yml index e05f368aa8b9..0e5b80d2e3a2 100644 --- a/.github/workflows/build-tee-prover-template.yml +++ b/.github/workflows/build-tee-prover-template.yml @@ -26,17 +26,17 @@ jobs: name: Build and Push Docker Images env: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-high-performance] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name == 'workflow_dispatch' }} with: ref: ${{ github.event.inputs.target_branch }} - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name != 'workflow_dispatch' }} - - uses: cachix/install-nix-action@v27 + - uses: cachix/install-nix-action@ba0dd844c9180cbf77aa72a116d6fbc515d0e87b # v27 with: extra_nix_config: | access-tokens = github.com=${{ github.token }} @@ -45,7 +45,7 @@ jobs: sandbox = true - name: Setup Attic cache - uses: ryanccn/attic-action@v0 + uses: ryanccn/attic-action@618a980988d704a7709daeea88526146acd1d45f # v0.2.1 with: endpoint: https://attic.teepot.org/ cache: tee-pot diff --git a/.github/workflows/cargo-license.yaml b/.github/workflows/cargo-license.yaml index db3cd4ddd895..72eb8d0d865b 100644 --- a/.github/workflows/cargo-license.yaml +++ b/.github/workflows/cargo-license.yaml @@ -4,5 +4,8 @@ jobs: cargo-deny: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - - uses: EmbarkStudios/cargo-deny-action@68cd9c5e3e16328a430a37c743167572e3243e7e + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: EmbarkStudios/cargo-deny-action@8371184bd11e21dcf8ac82ebf8c9c9f74ebf7268 # v2.0.1 + with: + command: check + command-arguments: "--hide-inclusion-graph" diff --git a/.github/workflows/check-pr-title.yml b/.github/workflows/check-pr-title.yml index 02c9b48600a6..bcac8df791fe 100644 --- a/.github/workflows/check-pr-title.yml +++ b/.github/workflows/check-pr-title.yml @@ -1,6 +1,6 @@ name: Check PR title on: - pull_request_target: + pull_request: types: - opened - reopened @@ -12,7 +12,38 @@ jobs: runs-on: ubuntu-latest permissions: statuses: write + pull-requests: write steps: - - uses: aslafy-z/conventional-pr-title-action@v3 + - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5 + id: lint_pr_title env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2 + # When the previous steps fails, the workflow would stop. By adding this + # condition you can continue the execution with the populated error message. + if: always() && (steps.lint_pr_title.outputs.error_message != null) + with: + header: pr-title-lint-error + message: | + Hey there! 👋🏼 + + We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. + Examples of valid PR titles: + + - feat(eth_sender): Support new transaction type + - fix(state_keeper): Correctly handle edge case + - ci: Add new workflow for linting + + Details: + + ``` + ${{ steps.lint_pr_title.outputs.error_message }} + ``` + + # Delete a previous comment when the issue has been resolved + - if: ${{ steps.lint_pr_title.outputs.error_message == null }} + uses: marocchino/sticky-pull-request-comment@331f8f5b4215f0445d3c07b4967662a32a2d3e31 # v2 + with: + header: pr-title-lint-error + delete: true diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 191c69180631..d4667a273ef4 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -4,7 +4,7 @@ on: jobs: build: - runs-on: [matterlabs-ci-runner] + runs-on: matterlabs-ci-runner-highmem-long env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 2fa6cde5fdeb..e46a67dd8af4 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -4,8 +4,7 @@ on: jobs: code_lint: - runs-on: [matterlabs-ci-runner] - + runs-on: matterlabs-ci-runner-highmem-long steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -16,26 +15,31 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV + echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | - mkdir -p ./volumes/postgres - run_retried docker compose pull zk postgres - docker compose up -d zk postgres + ci_localnet_up ci_run sccache --start-server - - name: Setup db + - name: Build run: | - ci_run zk - ci_run run_retried rustup show - ci_run zk db migrate + ci_run ./bin/zkt + ci_run yarn install + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + - name: Lints run: | - ci_run zk fmt --check - ci_run zk lint rust --check - ci_run zk lint toolbox --check - ci_run zk lint js --check - ci_run zk lint ts --check - ci_run zk lint md --check - ci_run zk db check-sqlx-data + ci_run zk_supervisor fmt --check + ci_run zk_supervisor lint -t md --check + ci_run zk_supervisor lint -t sol --check + ci_run zk_supervisor lint -t js --check + ci_run zk_supervisor lint -t ts --check + ci_run zk_supervisor lint -t rs --check + + - name: Check Database + run: | + ci_run zk_supervisor database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 85eefc862272..a88a8fe3944e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -13,7 +13,7 @@ jobs: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml unit-tests: - runs-on: [matterlabs-ci-runner] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -52,29 +52,26 @@ jobs: - name: Init run: | - ci_run zk ci_run run_retried rustup show - ci_run zk run yarn - ci_run zk db setup - ci_run zk compiler all - ci_run zk contract build + ci_run ./bin/zkt + ci_run zk_supervisor contracts - name: Contracts unit tests run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk test rust + ci_run zk_supervisor test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run zk f cargo test --release -p vm-benchmark --bench criterion --bench fill_bootloader + ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch loadtest: - runs-on: [matterlabs-ci-runner] + runs-on: [ matterlabs-ci-runner-high-performance ] strategy: fail-fast: false matrix: - vm_mode: ["old", "new"] + vm_mode: [ "OLD", "NEW" ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -90,11 +87,12 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 22000 || 16000 }} >> .env - echo ACCOUNTS_AMOUNT="150" >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env + echo ACCOUNTS_AMOUNT="100" >> .env + echo MAX_INFLIGHT_TXS="10" >> .env + echo SYNC_API_REQUESTS_LIMIT="15" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env - echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env - name: Start services run: | @@ -108,22 +106,34 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk - ci_run zk init --local-legacy-bridge-testing + ci_run ./bin/zkt + ci_run zk_inception chain create \ + --chain-name legacy \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites \ + --legacy-bridge + + ci_run zk_inception ecosystem init --dev --verbose + ci_run zk_supervisor contracts --test-contracts # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ - PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE" \ - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + ci_run sed -i -e 's/mode: FULL/mode: LIGHTWEIGHT/' chains/legacy/configs/general.yaml + ci_run sed -i -e 's/state_keeper_fast_vm_mode:.*/state_keeper_fast_vm_mode: ${{ matrix.vm_mode }}/' chains/legacy/configs/general.yaml + ci_run sed -i -e 's/delay_interval:.*/delay_interval: 50/' chains/legacy/configs/general.yaml + ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - - name: Deploy legacy era contracts - run: ci_run zk contract setup-legacy-bridge-era - - name: Perform loadtest - run: ci_run zk run loadtest + run: ci_run zk_supervisor t loadtest -v --chain=legacy - name: Show server.log logs if: always() @@ -142,13 +152,13 @@ jobs: # To be consistent with the rest of the workflow we disable it explicitly. fail-fast: false matrix: - consensus: [false, true] - base_token: ["Eth", "Custom"] - deployment_mode: ["Rollup", "Validium"] + consensus: [ false, true ] + base_token: [ "Eth", "Custom" ] + deployment_mode: [ "Rollup", "Validium" ] env: SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" - runs-on: [matterlabs-ci-runner] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -243,7 +253,7 @@ jobs: DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ SNAPSHOTS_CREATOR_VERSION=${{ matrix.deployment_mode == 'Validium' && '0' || '1' }} \ DISABLE_TREE_DURING_PRUNING=${{ matrix.base_token == 'Eth' }} \ - ETH_CLIENT_WEB3_URL="http://reth:8545" \ + ETH_CLIENT_WEB3_URL="http://localhost:8545" \ PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,DISABLE_TREE_DURING_PRUNING,SNAPSHOTS_CREATOR_VERSION,ETH_CLIENT_WEB3_URL" \ ci_run yarn recovery-test snapshot-recovery-test @@ -251,7 +261,7 @@ jobs: run: | ENABLE_CONSENSUS=${{ matrix.consensus }} \ DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - ETH_CLIENT_WEB3_URL="http://reth:8545" \ + ETH_CLIENT_WEB3_URL="http://localhost:8545" \ PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,ETH_CLIENT_WEB3_URL" \ ci_run yarn recovery-test genesis-recovery-test @@ -293,7 +303,7 @@ jobs: - name: Show revert.log logs if: always() - run: ci_run cat core/tests/revert-test/revert.log || true + run: ci_run cat logs/revert/default/server.log || true - name: Show upgrade.log logs if: always() @@ -314,10 +324,10 @@ jobs: strategy: fail-fast: false matrix: - consensus: [false, true] - base_token: ["Eth", "Custom"] - deployment_mode: ["Rollup", "Validium"] - runs-on: [matterlabs-ci-runner] + consensus: [ false, true ] + base_token: [ "Eth", "Custom" ] + deployment_mode: [ "Rollup", "Validium" ] + runs-on: [ matterlabs-ci-runner-highmem-long ] env: SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" @@ -382,7 +392,11 @@ jobs: - name: Run revert test run: | - ENABLE_CONSENSUS=${{ matrix.consensus }} DEPLOYMENT_MODE=${{ matrix.deployment_mode }} PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" ci_run zk test i revert-en + ENABLE_CONSENSUS=${{ matrix.consensus }} \ + DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ + PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" \ + ci_run zk test i revert-en + # test terminates the nodes, so we restart them. if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then ZKSYNC_ENV=docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & @@ -414,13 +428,13 @@ jobs: if: always() run: ci_run cat ext-node.log || true - - name: Show revert_main.log logs + - name: Show revert logs (main node) if: always() - run: ci_run cat core/tests/revert-test/revert_main.log || true + run: ci_run cat logs/revert/en/default/server.log || true - - name: Show revert_ext.log logs + - name: Show revert logs (EN) if: always() - run: ci_run cat core/tests/revert-test/revert_ext.log || true + run: ci_run cat logs/revert/en/default/external_node.log || true - name: Show upgrade.log logs if: always() @@ -431,3 +445,4 @@ jobs: run: | ci_run sccache --show-stats || true ci_run cat /tmp/sccache_log.txt || true + diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 82ef312c9832..2b8eea15a827 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -4,7 +4,8 @@ on: jobs: lint: - runs-on: [matterlabs-ci-runner] + # TODO: After migraton switch to CI + runs-on: matterlabs-default-infra-runners steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -19,13 +20,15 @@ jobs: - name: Start services run: | - mkdir -p ./volumes/postgres - run_retried docker compose pull zk postgres - docker compose up -d zk postgres + run_retried docker compose pull zk + docker compose up -d zk + + - name: Build + run: | + ci_run ./bin/zkt + ci_run yarn install + ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk - ci_run zk fmt md --check - ci_run zk lint md --check - + ci_run zk_supervisor lint -t md --check diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index b61a61b709d8..d1d4a9ab96b2 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -3,7 +3,7 @@ on: workflow_call: jobs: lint: - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-highmem-long] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" @@ -34,7 +34,7 @@ jobs: run: ci_run bash -c "cd prover && cargo fmt --check" unit-tests: - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-highmem-long] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index b2fc10c28aae..638f168de309 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -4,21 +4,26 @@ on: env: CLICOLOR: 1 + # We run multiple binaries in parallel, and by default they will try to utilize all the + # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of + # async work (tokio), so we prioritize tokio. + TOKIO_WORKER_THREADS: 4 + RAYON_NUM_THREADS: 2 jobs: lint: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml - build: - runs-on: [matterlabs-ci-runner] - + tests: + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: "recursive" fetch-depth: 0 + - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV @@ -30,145 +35,280 @@ jobs: ci_localnet_up ci_run sccache --start-server - - name: Build - run: | - ci_run bash -c "cd zk_toolbox && cargo build --release" - - # Compress with tar to avoid permission loss - # https://github.com/actions/upload-artifact?tab=readme-ov-file#permission-loss - - name: Tar zk_toolbox binaries - run: | - tar -C ./zk_toolbox/target/release -cvf zk_toolbox.tar zk_inception zk_supervisor - - - name: Upload zk_toolbox binaries - uses: actions/upload-artifact@v4 - with: - name: zk_toolbox - path: zk_toolbox.tar - compression-level: 0 - - tests: - runs-on: [matterlabs-ci-runner] - needs: [build] - - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: "recursive" - fetch-depth: 0 - - - name: Download zk_toolbox binaries - uses: actions/download-artifact@v4 - with: - name: zk_toolbox - path: . - - - name: Extract zk_toolbox binaries - run: | - tar -xvf zk_toolbox.tar -C ./bin - - - name: Setup environment - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo IN_DOCKER=1 >> .env + - name: Build zk_toolbox + run: ci_run bash -c "./bin/zkt" - - name: Start services + - name: Create log directories run: | - ci_localnet_up - ci_run sccache --start-server + SERVER_LOGS_DIR=logs/server + INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests + INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en + SNAPSHOT_RECOVERY_LOGS_DIR=logs/integration_tests/en + GENESIS_RECOVERY_LOGS_DIR=logs/integration_tests/en + EXTERNAL_NODE_LOGS_DIR=logs/external_node + REVERT_LOGS_DIR=logs/revert + + mkdir -p $SERVER_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR + mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR + mkdir -p $GENESIS_RECOVERY_LOGS_DIR + mkdir -p $EXTERNAL_NODE_LOGS_DIR + mkdir -p $REVERT_LOGS_DIR + + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV + echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV + echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - + ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://reth:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --prover-db-name=zksync_prover_localhost_era \ --ignore-prerequisites --verbose \ --observability=false - - name: Create and initialize chain + - name: Read Custom Token address and set as environment variable + run: | + address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) + echo "address=$address" + echo "address=$address" >> $GITHUB_ENV + + - name: Create and initialize Validium chain run: | ci_run zk_inception chain create \ - --chain-name chain_rollup \ + --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ + --l1-batch-commit-data-generator-mode validium \ --base-token-address 0x0000000000000000000000000000000000000001 \ --base-token-price-nominator 1 \ --base-token-price-denominator 1 \ - --set-as-default true \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_validium \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_validium \ + --port-offset 2000 \ + --chain validium + + - name: Create and initialize chain with Custom Token + run: | + ci_run zk_inception chain create \ + --chain-name custom_token \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ --ignore-prerequisites ci_run zk_inception chain init \ --deploy-paymaster \ - --l1-rpc-url=http://reth:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ - --server-db-name=zksync_server_localhost_rollup \ - --prover-db-url=postgres://postgres:notsecurepassword@postgres:5432 \ - --prover-db-name=zksync_prover_localhost_rollup + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_custom_token \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_custom_token \ + --port-offset 3000 \ + --chain custom_token + + - name: Create and initialize Consensus chain + run: | + ci_run zk_inception chain create \ + --chain-name consensus \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_consensus \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_consensus \ + --port-offset 4000 \ + --chain consensus + + - name: Build test dependencies + run: | + ci_run zk_supervisor test build - - name: Run server + - name: Run servers run: | - ci_run zk_inception server --ignore-prerequisites &>server.log & + ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zk_inception server --ignore-prerequisites --chain consensus \ + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + ci_run sleep 5 - name: Run integration tests run: | - ci_run zk_supervisor test integration --ignore-prerequisites --verbose + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & + PID1=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 - - name: Init external node server + - name: Init external nodes run: | - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@postgres:5432 \ - --db-name=zksync_en_localhost_era --l1-rpc-url=http://reth:8545 - ci_run zk_inception external-node init --ignore-prerequisites + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era + ci_run zk_inception external-node init --ignore-prerequisites --chain era + + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium + ci_run zk_inception external-node init --ignore-prerequisites --chain validium + + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token + ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token + + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus + ci_run zk_inception external-node init --ignore-prerequisites --chain consensus - name: Run recovery tests (from snapshot) run: | - ci_run zk_supervisor test recovery --snapshot --ignore-prerequisites --verbose - + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & + PID1=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//validium.log & + PID2=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//custom_token.log & + PID3=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + - name: Run recovery tests (from genesis) run: | - ci_run zk_supervisor test recovery --ignore-prerequisites --verbose - + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & + PID1=$! + + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + - name: Run external node server run: | - ci_run zk_inception external-node run --ignore-prerequisites &>external_node.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & ci_run sleep 5 - name: Run integration tests en run: | - ci_run zk_supervisor test integration --ignore-prerequisites --verbose --external-node + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & + PID1=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 - name: Run revert tests run: | - ci_run zk_supervisor test revert --ignore-prerequisites --verbose + ci_run killall -INT zksync_server || true + ci_run killall -INT zksync_external_node || true + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & + PID1=$! - - name: Run revert tests (external node) - run: | - ci_run zk_supervisor test revert --external-node --ignore-prerequisites --verbose + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & + PID3=$! - # This test should be the last one as soon as it - # finished bootloader will be different + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + + + # Upgrade tests should run last, because as soon as they + # finish the bootloader will be different + # TODO make upgrade tests safe to run multiple times - name: Run upgrade test run: | - ci_run zk_supervisor test upgrade - - - name: Show server.log logs - if: always() - run: ci_run cat server.log || true + ci_run zk_supervisor test upgrade --no-deps --chain era - - name: Show external_node.log logs - if: always() - run: ci_run cat external_node.log || true - - name: Show revert.log logs + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: always() - run: ci_run cat ./core/tests/revert-test/revert.log || true + with: + name: logs + path: logs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f8264d4466c1..e05b84cda971 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | prover: @@ -62,6 +62,7 @@ jobs: - '!**/*.MD' - 'docker-compose.yml' zk_toolbox: + - '.github/workflows/ci-zk-toolbox-reusable.yml' - 'zk_toolbox/**' - '!**/*.md' - '!**/*.MD' @@ -75,6 +76,7 @@ jobs: - 'etc/**' - 'contracts/**' - 'infrastructure/zk/**' + - 'docker/zk-environment/**' - '!**/*.md' - '!**/*.MD' diff --git a/.github/workflows/nodejs-license.yaml b/.github/workflows/nodejs-license.yaml index b776673e1298..642ded744021 100644 --- a/.github/workflows/nodejs-license.yaml +++ b/.github/workflows/nodejs-license.yaml @@ -47,7 +47,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Use Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 18 diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index d2885f613aa0..9c2c34186701 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -33,7 +33,7 @@ jobs: compatibility: runs-on: [ubuntu-22.04-github-hosted-16core] steps: - - uses: mozilla-actions/sccache-action@v0.0.3 + - uses: mozilla-actions/sccache-action@89e9040de88b577a072e3760aaf59f585da083af # v0.0.5 # before - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -69,7 +69,7 @@ jobs: | xargs cat > ./after.binpb # compare - - uses: bufbuild/buf-setup-action@v1 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ github.token }} - name: buf breaking diff --git a/.github/workflows/release-please-cargo-lock.yml b/.github/workflows/release-please-cargo-lock.yml index bdb5906716ca..8c8036dfa47a 100644 --- a/.github/workflows/release-please-cargo-lock.yml +++ b/.github/workflows/release-please-cargo-lock.yml @@ -6,6 +6,7 @@ on: name: release-please-update-cargo-lock jobs: update_cargo_lock: + # TODO: After migraton switch to CI runs-on: [matterlabs-default-infra-runners] steps: diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 692a420eed81..4a8f527f45c6 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Run release-please id: release - uses: google-github-actions/release-please-action@v4 + uses: google-github-actions/release-please-action@e4dc86ba9405554aeba3c6bb2d169500e7d3b4ee # v4.1.1 with: token: ${{ secrets.RELEASE_TOKEN }} config-file: .github/release-please/config.json diff --git a/.github/workflows/release-stable-en.yml b/.github/workflows/release-stable-en.yml index b68f36c3e6fd..222d033069d6 100644 --- a/.github/workflows/release-stable-en.yml +++ b/.github/workflows/release-stable-en.yml @@ -10,7 +10,8 @@ on: jobs: release: - runs-on: [matterlabs-ci-runner] + # TODO: After migraton switch to CI + runs-on: [matterlabs-default-infra-runners] steps: - name: Login to Docker registries run: | @@ -42,7 +43,7 @@ jobs: docker pull $alpha_tag docker tag $alpha_tag $tag docker push $tag - + platform_tags+=" --amend $tag" done for manifest in "${repo}:${tag_name}" "${repo}:2.0-${tag_name}"; do diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 9f921be78292..ce74b76a6b7c 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -23,7 +23,7 @@ jobs: - name: Get all test, doc and src files that have changed id: changed-files-yaml - uses: tj-actions/changed-files@v37 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | # TODO: make it more granular, as already implemented in CI workflow @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [matterlabs-deployer-stage] + runs-on: [ matterlabs-deployer-stage ] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -61,7 +61,7 @@ jobs: build-push-core-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +72,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +84,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,26 +95,26 @@ jobs: build-push-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} @@ -122,7 +122,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 53dada123574..cfcfff93037f 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -1,4 +1,4 @@ -name: Compare VM perfomance to base branch +name: Compare VM performance to base branch on: pull_request: @@ -8,7 +8,7 @@ on: jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-highmem-long] steps: - name: checkout base branch @@ -47,7 +47,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee base-opcodes || touch base-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes ci_run yarn workspace system-contracts clean - name: checkout PR @@ -59,7 +59,7 @@ jobs: ci_run zk ci_run zk compiler system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cargo run --package vm-benchmark --release --bin instruction-counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT @@ -68,7 +68,7 @@ jobs: id: comparison - name: Comment on PR - uses: thollander/actions-comment-pull-request@v2 + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 with: message: | ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index fce7ead2d696..4d90b2a24ebb 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -12,7 +12,7 @@ concurrency: vm-benchmarks jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-highmem-long] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -21,7 +21,7 @@ jobs: - name: setup-env run: | - echo PUSH_VM_BENCHMARKS_TO_PROMETHEUS=1 >> .env + echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH @@ -31,10 +31,12 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zk - ci_run zk compiler system-contracts + ci_run zk compiler all - name: run benchmarks run: | - ci_run cargo bench --package vm-benchmark --bench diy_benchmark + ci_run cargo bench --package vm-benchmark --bench oneshot + # Run only benches with 1,000 transactions per batch to not spend too much time + ci_run cargo bench --package vm-benchmark --bench batch '/1000$' ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5036533abf72..5a08dff178c4 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -36,7 +36,7 @@ jobs: - name: Get changed files id: changed-files-yaml - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | zk_env: @@ -76,7 +76,7 @@ jobs: fail-fast: false matrix: include: - - runner: matterlabs-ci-runner + - runner: matterlabs-ci-runner-high-performance arch: amd64 - runner: matterlabs-ci-runner-arm arch: arm64 @@ -129,7 +129,8 @@ jobs: packages: write contents: read needs: [changed_files, get_short_sha, zk_environment] - runs-on: matterlabs-ci-runner + # TODO: After migraton switch to CI + runs-on: [matterlabs-default-infra-runners] steps: - name: Login to DockerHub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -188,7 +189,7 @@ jobs: packages: write contents: read needs: changed_files - runs-on: [matterlabs-ci-runner] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: cuda_version: ['11_8', '12_0'] diff --git a/.gitignore b/.gitignore index 66d7d00b263a..725b5940afeb 100644 --- a/.gitignore +++ b/.gitignore @@ -110,6 +110,7 @@ hyperchain-*.yml # Prover keys that should not be commited prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* +prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* diff --git a/.prettierignore b/.prettierignore index d58a7f3e8e6e..51cd5e684096 100644 --- a/.prettierignore +++ b/.prettierignore @@ -34,3 +34,4 @@ contracts/l1-contracts/lib **/.git **/node_modules +configs/portal.config.js \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS index 63094b333057..813cd396d2c2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,4 @@ -.github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta -**/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc @EmilLuta +.github/release-please/** @matter-labs/core-release-managers +**/CHANGELOG.md @matter-labs/core-release-managers CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc .github/workflows/** @matter-labs/devops diff --git a/Cargo.lock b/Cargo.lock index 0d4ba4c23834..59b464f8501d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,7 +44,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -80,7 +80,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", "zerocopy", @@ -239,6 +239,121 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-io" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +dependencies = [ + "async-lock", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "2.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" +dependencies = [ + "async-channel", + "async-io", + "async-lock", + "async-signal", + "async-task", + "blocking", + "cfg-if", + "event-listener 5.3.1", + "futures-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-signal" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +dependencies = [ + "async-io", + "async-lock", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.59.0", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -261,6 +376,12 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.74" @@ -281,6 +402,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-take" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -350,6 +477,7 @@ dependencies = [ "matchit", "memchr", "mime", + "multer", "percent-encoding", "pin-project-lite", "rustversion", @@ -406,7 +534,7 @@ checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -425,6 +553,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base58" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" + [[package]] name = "base64" version = "0.13.1" @@ -467,30 +601,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bellman_ce" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.28", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - [[package]] name = "bigdecimal" version = "0.4.5" @@ -557,6 +667,17 @@ dependencies = [ "which", ] +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes", + "serde", + "unicode-normalization", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -566,6 +687,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + [[package]] name = "bitflags" version = "1.3.2" @@ -622,6 +749,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2-rfc_bellman_edition" version = "0.0.1" @@ -630,7 +767,7 @@ checksum = "fdc60350286c7c3db13b98e91dbe5c8b6830a6821bc20af5b0c310ce94d74915" dependencies = [ "arrayvec 0.4.12", "byteorder", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -643,14 +780,14 @@ dependencies = [ ] [[package]] -name = "blake2s_const" -version = "0.7.0" +name = "blake2b_simd" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", + "arrayvec 0.7.4", + "constant_time_eq 0.3.1", ] [[package]] @@ -661,7 +798,7 @@ checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -708,6 +845,19 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel", + "async-task", + "futures-io", + "futures-lite", + "piper", +] + [[package]] name = "blst" version = "0.3.13" @@ -722,18 +872,17 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" +checksum = "68ec2f007ff8f90cc459f03e9f30ca1065440170f013c868823646e2e48d0234" dependencies = [ "arrayvec 0.7.4", "bincode", "blake2 0.10.6", "const_format", - "convert_case", - "crossbeam 0.8.4", + "convert_case 0.6.0", + "crossbeam", "crypto-bigint 0.5.3", - "cs_derive", "derivative", "ethereum-types", "firestorm", @@ -741,7 +890,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce", "rand 0.8.5", "rayon", "serde", @@ -749,6 +897,8 @@ dependencies = [ "sha3_ce", "smallvec", "unroll", + "zksync_cs_derive", + "zksync_pairing", ] [[package]] @@ -775,11 +925,20 @@ dependencies = [ "syn_derive", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + [[package]] name = "build_html" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" @@ -913,12 +1072,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -937,7 +1090,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -1046,82 +1199,82 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" +checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" dependencies = [ "derivative", "serde", - "zk_evm 0.150.4", - "zkevm_circuits 0.150.4", + "zk_evm 0.150.5", + "zkevm_circuits 0.150.5", ] [[package]] name = "circuit_sequencer_api" -version = "0.133.0" +version = "0.133.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" +checksum = "eb959b1f8c6bbd8be711994d182e85452a26a5d2213a709290b71c8262af1331" dependencies = [ - "bellman_ce", "derivative", "rayon", "serde", "zk_evm 0.133.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.140.0" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" +checksum = "fa5f22311ce609d852d7d9f4943535ea4610aeb785129ae6ff83d5201c4fb387" dependencies = [ - "bellman_ce", "circuit_encodings 0.140.1", "derivative", "rayon", "serde", "zk_evm 0.140.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a257b31a8ea1c1723cab4fb5661c6b4c0ebe022d4b73bea9eb7c9150bd3bc1" +checksum = "4c47c71d6ba83a8beb0af13af70beffd627f5497caf3d44c6f96363e788b07ea" dependencies = [ - "bellman_ce", "circuit_encodings 0.141.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.142.0" +version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" +checksum = "e264723359e6a1aad98110bdccf1ae3ad596e93e7d31da9e40f6adc07e4add54" dependencies = [ - "bellman_ce", "circuit_encodings 0.142.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" +checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" dependencies = [ - "bellman_ce", - "circuit_encodings 0.150.4", + "circuit_encodings 0.150.5", "derivative", "rayon", "serde", + "zksync_bellman", ] [[package]] @@ -1257,7 +1410,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1304,6 +1457,18 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "convert_case" version = "0.6.0" @@ -1359,7 +1524,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1398,41 +1563,17 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", -] - [[package]] name = "crossbeam" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-deque 0.8.5", - "crossbeam-epoch 0.9.18", - "crossbeam-queue 0.3.11", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] @@ -1441,18 +1582,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1461,23 +1591,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -1486,18 +1601,7 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1506,18 +1610,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] @@ -1624,7 +1717,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1651,8 +1744,28 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1669,24 +1782,74 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.86", + "quote 1.0.36", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.86", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.72", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "dashmap" version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -1747,14 +1910,27 @@ dependencies = [ [[package]] name = "derive_more" -version = "1.0.0-beta.6" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ - "derive_more-impl", -] - -[[package]] + "convert_case 0.4.0", + "proc-macro2 1.0.86", + "quote 1.0.36", + "rustc_version", + "syn 2.0.72", +] + +[[package]] +name = "derive_more" +version = "1.0.0-beta.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +dependencies = [ + "derive_more-impl", +] + +[[package]] name = "derive_more-impl" version = "1.0.0-beta.6" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1799,6 +1975,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + [[package]] name = "dtoa" version = "1.0.9" @@ -1861,6 +2043,21 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "hashbrown 0.14.5", + "hex", + "rand_core 0.6.4", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "either" version = "1.9.0" @@ -1931,7 +2128,7 @@ version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -2005,7 +2202,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "home", "windows-sys 0.48.0", ] @@ -2054,6 +2251,16 @@ dependencies = [ "uint", ] +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "pin-project-lite", +] + [[package]] name = "event-listener" version = "5.3.1" @@ -2065,6 +2272,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -2098,27 +2315,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" dependencies = [ "byteorder", - "ff_derive_ce", "hex", "rand 0.4.6", "serde", ] -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "proc-macro2 1.0.86", - "quote 1.0.36", - "serde", - "syn 1.0.109", -] - [[package]] name = "fiat-crypto" version = "0.2.3" @@ -2211,9 +2412,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2228,19 +2429,43 @@ dependencies = [ "num", ] +[[package]] +name = "frame-metadata" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", +] + +[[package]] +name = "frame-metadata" +version = "16.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "franklin-crypto" -version = "0.1.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "178bca54fc449a6f4cb45321ed9d769353143ac7ef314ea310f3a0c61bed2da2" +checksum = "971289216ea5c91872e5e0bb6989214b537bbce375d09fabea5c3ccfe031b204" dependencies = [ "arr_macro", - "bellman_ce", "bit-vec", "blake2 0.9.2", "blake2-rfc_bellman_edition", "blake2s_simd", + "boojum", "byteorder", + "derivative", "digest 0.9.0", "hex", "indexmap 1.9.3", @@ -2257,6 +2482,7 @@ dependencies = [ "smallvec", "splitmut", "tiny-keccak 1.5.0", + "zksync_bellman", ] [[package]] @@ -2285,9 +2511,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2300,9 +2526,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2310,15 +2536,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2339,15 +2565,28 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -2356,15 +2595,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" @@ -2378,9 +2617,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2412,7 +2651,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.4.6", - "futures 0.3.28", + "futures 0.3.30", "serde", "serde_json", "serde_yaml", @@ -2436,13 +2675,23 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand 0.8.5", + "rand_core 0.6.4", +] + [[package]] name = "ghash" version = "0.5.0" @@ -2593,7 +2842,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", - "futures 0.3.28", + "futures 0.3.30", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2700,6 +2949,7 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.7", "allocator-api2", + "serde", ] [[package]] @@ -2747,6 +2997,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -2759,7 +3015,17 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "hmac", + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", ] [[package]] @@ -2771,6 +3037,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array", + "hmac 0.8.1", +] + [[package]] name = "home" version = "0.5.5" @@ -2904,6 +3181,22 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.9", + "hyper 0.14.29", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2915,10 +3208,10 @@ dependencies = [ "hyper 1.3.1", "hyper-util", "log", - "rustls", + "rustls 0.23.10", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tower-service", ] @@ -3021,9 +3314,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3101,6 +3394,12 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "indexmap-nostd" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e04e2fd2b8188ea827b32ef11de88377086d690286ab35747ef7f9bf3ccb590" + [[package]] name = "inout" version = "0.1.3" @@ -3124,6 +3423,15 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -3201,24 +3509,57 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonrpsee" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" +dependencies = [ + "jsonrpsee-client-transport 0.21.0", + "jsonrpsee-core 0.21.0", + "jsonrpsee-http-client 0.21.0", + "jsonrpsee-types 0.21.0", +] + [[package]] name = "jsonrpsee" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-http-client", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-http-client 0.23.2", "jsonrpsee-proc-macros", "jsonrpsee-server", - "jsonrpsee-types", + "jsonrpsee-types 0.23.2", "jsonrpsee-wasm-client", "jsonrpsee-ws-client", "tokio", "tracing", ] +[[package]] +name = "jsonrpsee-client-transport" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" +dependencies = [ + "futures-util", + "http 0.2.9", + "jsonrpsee-core 0.21.0", + "pin-project", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "soketto 0.7.1", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-client-transport" version = "0.23.2" @@ -3230,20 +3571,44 @@ dependencies = [ "futures-util", "gloo-net", "http 1.1.0", - "jsonrpsee-core", + "jsonrpsee-core 0.23.2", "pin-project", - "rustls", + "rustls 0.23.10", "rustls-pki-types", "rustls-platform-verifier", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tokio-util", "tracing", "url", ] +[[package]] +name = "jsonrpsee-core" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" +dependencies = [ + "anyhow", + "async-lock", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "hyper 0.14.29", + "jsonrpsee-types 0.21.0", + "pin-project", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "jsonrpsee-core" version = "0.23.2" @@ -3259,7 +3624,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "jsonrpsee-types", + "jsonrpsee-types 0.23.2", "parking_lot", "pin-project", "rand 0.8.5", @@ -3273,6 +3638,26 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +dependencies = [ + "async-trait", + "hyper 0.14.29", + "hyper-rustls 0.24.2", + "jsonrpsee-core 0.21.0", + "jsonrpsee-types 0.21.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-http-client" version = "0.23.2" @@ -3283,11 +3668,11 @@ dependencies = [ "base64 0.22.1", "http-body 1.0.0", "hyper 1.3.1", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", - "rustls", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", + "rustls 0.23.10", "rustls-platform-verifier", "serde", "serde_json", @@ -3324,13 +3709,13 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "pin-project", "route-recognizer", "serde", "serde_json", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", "tokio-stream", @@ -3339,6 +3724,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-types" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3266dfb045c9174b24c77c2dfe0084914bb23a6b2597d70c9dc6018392e1cd1b" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "jsonrpsee-types" version = "0.23.2" @@ -3358,9 +3756,9 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", ] [[package]] @@ -3370,9 +3768,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ "http 1.1.0", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "url", ] @@ -3397,7 +3795,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", "sha2 0.10.8", @@ -3409,7 +3807,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", @@ -3459,7 +3857,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -3485,6 +3883,54 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3526,7 +3972,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.28", + "futures 0.3.30", "hex", "num", "once_cell", @@ -3604,6 +4050,9 @@ name = "lru" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "lz4-sys" @@ -3645,19 +4094,13 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.7", ] @@ -3667,15 +4110,6 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "merkle_tree_consistency_checker" version = "0.1.0" @@ -3691,6 +4125,18 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "miette" version = "5.10.0" @@ -3736,8 +4182,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-utils 0.8.20", + "crossbeam-channel", + "crossbeam-utils", "dashmap", "skeptic", "smallvec", @@ -3778,6 +4224,23 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http 1.1.0", + "httparse", + "memchr", + "mime", + "spin", + "version_check", +] + [[package]] name = "multimap" version = "0.8.3" @@ -3809,7 +4272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "libc", ] @@ -3819,6 +4282,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + [[package]] name = "nodrop" version = "0.1.14" @@ -4069,7 +4538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -4248,24 +4717,11 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "pairing_ce" -version = "0.28.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -4277,11 +4733,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.36", "syn 1.0.109", @@ -4309,7 +4765,7 @@ version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -4322,6 +4778,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -4349,9 +4814,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -4440,6 +4905,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -4505,6 +4981,21 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -4522,7 +5013,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash", @@ -4579,6 +5070,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", + "scale-info", "uint", ] @@ -4838,7 +5330,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", "libc", "mach", "once_cell", @@ -4979,8 +5471,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.5", - "crossbeam-utils 0.8.20", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -5003,13 +5495,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.7", "regex-syntax 0.8.2", ] @@ -5024,9 +5516,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", @@ -5109,7 +5601,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -5121,7 +5613,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "serde", "serde_json", "serde_urlencoded", @@ -5156,15 +5648,18 @@ dependencies = [ [[package]] name = "rescue_poseidon" -version = "0.4.1" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ada2124f92cf32b813e50f6f7d9e92f05addc321edb8b68f9b4e2bb6e0d5af8b" +checksum = "82900c877a0ba5362ac5756efbd82c5b795dc509011c1253e2389d8708f1389d" dependencies = [ "addchain", "arrayvec 0.7.4", "blake2 0.10.6", "byteorder", + "derivative", "franklin-crypto", + "lazy_static", + "log", "num-bigint 0.3.3", "num-integer", "num-iter", @@ -5173,6 +5668,7 @@ dependencies = [ "serde", "sha3 0.9.1", "smallvec", + "typemap_rev", ] [[package]] @@ -5182,7 +5678,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint 0.4.9", - "hmac", + "hmac 0.12.1", "zeroize", ] @@ -5192,7 +5688,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "hmac", + "hmac 0.12.1", "subtle", ] @@ -5341,6 +5837,32 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + [[package]] name = "rustls" version = "0.23.10" @@ -5352,11 +5874,23 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -5364,12 +5898,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "rustls-pki-types", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.5", +] + [[package]] name = "rustls-pemfile" version = "2.0.0" @@ -5397,10 +5940,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", - "rustls-native-certs", + "rustls 0.23.10", + "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.102.4", "security-framework", "security-framework-sys", "webpki-roots", @@ -5413,6 +5956,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.102.4" @@ -5431,6 +5984,17 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "ruzstd" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" +dependencies = [ + "byteorder", + "derive_more 0.99.18", + "twox-hash", +] + [[package]] name = "ryu" version = "1.0.15" @@ -5446,6 +6010,132 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-bits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "036575c29af9b6e4866ffb7fa055dbf623fe7a9cc159b33786de6013a6969d89" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "scale-decode" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7caaf753f8ed1ab4752c6afb20174f03598c664724e0e32628e161c21000ff76" +dependencies = [ + "derive_more 0.99.18", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-decode-derive", + "scale-info", + "smallvec", +] + +[[package]] +name = "scale-decode-derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3475108a1b62c7efd1b5c65974f30109a598b2f45f23c9ae030acb9686966db" +dependencies = [ + "darling 0.14.4", + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-encode" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d70cb4b29360105483fac1ed567ff95d65224a14dd275b6303ed0a654c78de5" +dependencies = [ + "derive_more 0.99.18", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-encode-derive", + "scale-info", + "smallvec", +] + +[[package]] +name = "scale-encode-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995491f110efdc6bea96d6a746140e32bfceb4ea47510750a5467295a4707a25" +dependencies = [ + "darling 0.14.4", + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-info" +version = "2.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +dependencies = [ + "bitvec", + "cfg-if", + "derive_more 0.99.18", + "parity-scale-codec", + "scale-info-derive", + "serde", +] + +[[package]] +name = "scale-info-derive" +version = "2.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-typegen" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00860983481ac590ac87972062909bef0d6a658013b592ccc0f2feb272feab11" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "scale-info", + "syn 2.0.72", + "thiserror", +] + +[[package]] +name = "scale-value" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58223c7691bf0bd46b43c9aea6f0472d1067f378d574180232358d7c6e0a8089" +dependencies = [ + "base58", + "blake2 0.10.6", + "derive_more 0.99.18", + "either", + "frame-metadata 15.1.0", + "parity-scale-codec", + "scale-bits", + "scale-decode", + "scale-encode", + "scale-info", + "serde", + "yap", +] + [[package]] name = "schannel" version = "0.1.22" @@ -5455,12 +6145,41 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "schnorrkel" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" +dependencies = [ + "aead", + "arrayref", + "arrayvec 0.7.4", + "curve25519-dalek", + "getrandom_or_panic", + "merlin", + "rand_core 0.6.4", + "serde_bytes", + "sha2 0.10.8", + "subtle", + "zeroize", +] + [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "seahash" version = "4.1.0" @@ -5502,6 +6221,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ "secp256k1-sys", + "serde", ] [[package]] @@ -5546,11 +6266,22 @@ dependencies = [ "libc", ] +[[package]] +name = "selector_generator" +version = "0.1.0" +dependencies = [ + "clap 4.4.6", + "glob", + "serde", + "serde_json", + "sha3 0.10.8", +] + [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5677,9 +6408,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5694,11 +6425,20 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -5707,11 +6447,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5756,7 +6497,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2 1.0.86", "quote 1.0.36", "syn 1.0.109", @@ -5775,13 +6516,26 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5793,7 +6547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5805,7 +6559,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5816,7 +6570,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5921,6 +6675,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "sized-chunks" version = "0.6.5" @@ -5964,12 +6724,120 @@ dependencies = [ "serde", ] +[[package]] +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" +dependencies = [ + "async-channel", + "async-executor", + "async-fs", + "async-io", + "async-lock", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "smoldot" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" +dependencies = [ + "arrayvec 0.7.4", + "async-lock", + "atomic-take", + "base64 0.21.5", + "bip39", + "blake2-rfc", + "bs58", + "chacha20", + "crossbeam-queue", + "derive_more 0.99.18", + "ed25519-zebra", + "either", + "event-listener 4.0.3", + "fnv", + "futures-lite", + "futures-util", + "hashbrown 0.14.5", + "hex", + "hmac 0.12.1", + "itertools 0.12.0", + "libm", + "libsecp256k1", + "merlin", + "no-std-net", + "nom", + "num-bigint 0.4.6", + "num-rational", + "num-traits", + "pbkdf2", + "pin-project", + "poly1305", + "rand 0.8.5", + "rand_chacha", + "ruzstd", + "schnorrkel", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "siphasher", + "slab", + "smallvec", + "soketto 0.7.1", + "twox-hash", + "wasmi", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "smoldot-light" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" +dependencies = [ + "async-channel", + "async-lock", + "base64 0.21.5", + "blake2-rfc", + "derive_more 0.99.18", + "either", + "event-listener 4.0.3", + "fnv", + "futures-channel", + "futures-lite", + "futures-util", + "hashbrown 0.14.5", + "hex", + "itertools 0.12.0", + "log", + "lru", + "no-std-net", + "parking_lot", + "pin-project", + "rand 0.8.5", + "rand_chacha", + "serde", + "serde_json", + "siphasher", + "slab", + "smol", + "smoldot", + "zeroize", +] + [[package]] name = "snapshots_creator" version = "0.1.0" dependencies = [ "anyhow", - "futures 0.3.28", + "futures 0.3.30", "rand 0.8.5", "structopt", "tokio", @@ -6010,6 +6878,21 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64 0.13.1", + "bytes", + "futures 0.3.30", + "httparse", + "log", + "rand 0.8.5", + "sha-1", +] + [[package]] name = "soketto" version = "0.8.0" @@ -6018,7 +6901,7 @@ checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", - "futures 0.3.28", + "futures 0.3.30", "http 1.1.0", "httparse", "log", @@ -6026,6 +6909,20 @@ dependencies = [ "sha1", ] +[[package]] +name = "sp-core-hashing" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0f4990add7b2cefdeca883c0efa99bb4d912cb2196120e1500c0cc099553b0" +dependencies = [ + "blake2b_simd", + "byteorder", + "digest 0.10.7", + "sha2 0.10.8", + "sha3 0.10.8", + "twox-hash", +] + [[package]] name = "spin" version = "0.9.8" @@ -6097,9 +6994,9 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.11", + "crossbeam-queue", "either", - "event-listener", + "event-listener 5.3.1", "futures-channel", "futures-core", "futures-intrusive", @@ -6192,7 +7089,7 @@ dependencies = [ "generic-array", "hex", "hkdf", - "hmac", + "hmac 0.12.1", "itoa", "log", "md-5", @@ -6234,7 +7131,7 @@ dependencies = [ "futures-util", "hex", "hkdf", - "hmac", + "hmac 0.12.1", "home", "ipnetwork", "itoa", @@ -6315,6 +7212,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "structopt" version = "0.3.26" @@ -6367,6 +7270,129 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +[[package]] +name = "subxt" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3323d5c27898b139d043dc1ee971f602f937b99354ee33ee933bd90e0009fbd" +dependencies = [ + "async-trait", + "base58", + "blake2 0.10.6", + "derivative", + "either", + "frame-metadata 16.0.0", + "futures 0.3.30", + "hex", + "impl-serde", + "instant", + "jsonrpsee 0.21.0", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-decode", + "scale-encode", + "scale-info", + "scale-value", + "serde", + "serde_json", + "sp-core-hashing", + "subxt-lightclient", + "subxt-macro", + "subxt-metadata", + "thiserror", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "subxt-codegen" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0e58c3f88651cff26aa52bae0a0a85f806a2e923a20eb438c16474990743ea" +dependencies = [ + "frame-metadata 16.0.0", + "heck 0.4.1", + "hex", + "jsonrpsee 0.21.0", + "parity-scale-codec", + "proc-macro2 1.0.86", + "quote 1.0.36", + "scale-info", + "scale-typegen", + "subxt-metadata", + "syn 2.0.72", + "thiserror", + "tokio", +] + +[[package]] +name = "subxt-lightclient" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecec7066ba7bc0c3608fcd1d0c7d9584390990cd06095b6ae4f114f74c4b8550" +dependencies = [ + "futures 0.3.30", + "futures-util", + "serde", + "serde_json", + "smoldot-light", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "subxt-macro" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "365251668613323064803427af8c7c7bc366cd8b28e33639640757669dafebd5" +dependencies = [ + "darling 0.20.10", + "parity-scale-codec", + "proc-macro-error", + "quote 1.0.36", + "scale-typegen", + "subxt-codegen", + "syn 2.0.72", +] + +[[package]] +name = "subxt-metadata" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c02aca8d39a1f6c55fff3a8fd81557d30a610fedc1cef03f889a81bc0f8f0b52" +dependencies = [ + "frame-metadata 16.0.0", + "parity-scale-codec", + "scale-info", + "sp-core-hashing", + "thiserror", +] + +[[package]] +name = "subxt-signer" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88a76a5d114bfae2f6f9cc1491c46173ecc3fb2b9e53948eb3c8d43d4b43ab5" +dependencies = [ + "bip39", + "hex", + "hmac 0.12.1", + "parity-scale-codec", + "pbkdf2", + "regex", + "schnorrkel", + "secrecy", + "sha2 0.10.8", + "sp-core-hashing", + "subxt", + "thiserror", + "zeroize", +] + [[package]] name = "syn" version = "0.15.44" @@ -6477,7 +7503,7 @@ version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "rustix", "windows-sys 0.52.0", @@ -6550,18 +7576,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -6574,7 +7600,7 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -6691,7 +7717,7 @@ dependencies = [ "pin-project-lite", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", ] [[package]] @@ -6733,13 +7759,34 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -6758,9 +7805,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6768,7 +7815,6 @@ dependencies = [ "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -7021,6 +8067,23 @@ dependencies = [ "termcolor", ] +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if", + "digest 0.10.7", + "static_assertions", +] + +[[package]] +name = "typemap_rev" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74b08b0c1257381af16a5c3605254d529d3e7e109f3c62befc5d168968192998" + [[package]] name = "typenum" version = "1.17.0" @@ -7161,9 +8224,9 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -7271,25 +8334,18 @@ dependencies = [ name = "vm-benchmark" version = "0.1.0" dependencies = [ + "assert_matches", "criterion", "iai", + "once_cell", "rand 0.8.5", "tokio", "vise", + "zksync_contracts", + "zksync_multivm", "zksync_types", + "zksync_utils", "zksync_vlog", - "zksync_vm_benchmark_harness", -] - -[[package]] -name = "vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" -dependencies = [ - "enum_dispatch", - "primitive-types", - "zk_evm_abstractions 0.150.4", - "zkevm_opcode_defs 0.150.4", ] [[package]] @@ -7335,7 +8391,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -7360,7 +8416,7 @@ version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -7408,6 +8464,46 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmi" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" +dependencies = [ + "smallvec", + "spin", + "wasmi_arena", + "wasmi_core", + "wasmparser-nostd", +] + +[[package]] +name = "wasmi_arena" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" + +[[package]] +name = "wasmi_core" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf1a7db34bff95b85c261002720c00c3a6168256dcb93041d3fa2054d19856a" +dependencies = [ + "downcast-rs", + "libm", + "num-traits", + "paste", +] + +[[package]] +name = "wasmparser-nostd" +version = "0.100.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5a015fe95f3504a94bb1462c717aae75253e39b9dd6c3fb1062c934535c64aa" +dependencies = [ + "indexmap-nostd", +] + [[package]] name = "web-sys" version = "0.3.64" @@ -7526,6 +8622,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7719,7 +8824,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7729,7 +8834,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7742,6 +8847,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "yaml-rust" version = "0.4.5" @@ -7757,6 +8874,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" + [[package]] name = "zerocopy" version = "0.7.31" @@ -7865,9 +8988,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" +checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" dependencies = [ "anyhow", "lazy_static", @@ -7875,7 +8998,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.4", + "zk_evm_abstractions 0.150.5", ] [[package]] @@ -7906,15 +9029,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" +checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -7963,13 +9086,12 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" +checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" dependencies = [ "arrayvec 0.7.4", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -7978,7 +9100,8 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", + "zksync_cs_derive", ] [[package]] @@ -8025,9 +9148,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" +checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -8046,6 +9169,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bigdecimal", "chrono", "hex", "rand 0.8.5", @@ -8081,6 +9205,29 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_bellman" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffa03efe9bdb137a4b36b97d1a74237e18c9ae42b755163d903a9d48c1a5d80" +dependencies = [ + "arrayvec 0.7.4", + "bit-vec", + "blake2s_simd", + "byteorder", + "cfg-if", + "crossbeam", + "futures 0.3.30", + "hex", + "lazy_static", + "num_cpus", + "rand 0.4.6", + "serde", + "smallvec", + "tiny-keccak 1.5.0", + "zksync_pairing", +] + [[package]] name = "zksync_block_reverter" version = "0.1.0" @@ -8088,7 +9235,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "tempfile", "test-casing", @@ -8110,7 +9257,6 @@ name = "zksync_circuit_breaker" version = "0.1.0" dependencies = [ "anyhow", - "assert_matches", "async-trait", "thiserror", "tokio", @@ -8125,10 +9271,10 @@ name = "zksync_commitment_generator" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.150.4", - "futures 0.3.28", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.150.5", + "futures 0.3.30", "itertools 0.10.5", "num_cpus", "rand 0.8.5", @@ -8139,7 +9285,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -8155,9 +9301,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", @@ -8180,6 +9326,7 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "serde_json", "tracing", "url", "zksync_basic_types", @@ -8191,9 +9338,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" +checksum = "45c409ae915056cf9cadd9304dbc8718fa38edfcb346d06e5b3582dcd2489ef9" dependencies = [ "anyhow", "async-trait", @@ -8213,9 +9360,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" dependencies = [ "anyhow", "blst", @@ -8226,7 +9373,6 @@ dependencies = [ "k256 0.13.3", "num-bigint 0.4.6", "num-traits", - "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -8237,13 +9383,14 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" +checksum = "6b018b8a76fc2cbecb51683ce97532501c45d44cbc8bb856d1956e5998259335" dependencies = [ "anyhow", "async-trait", "rand 0.8.5", + "semver", "tracing", "vise", "zksync_concurrency", @@ -8258,9 +9405,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" +checksum = "f5bb2988e41af3083cebfc11f47f2615adae8d829bf9237aa084dede9629a687" dependencies = [ "anyhow", "async-trait", @@ -8275,11 +9422,12 @@ dependencies = [ "pin-project", "prost 0.12.1", "rand 0.8.5", + "semver", "snow", "thiserror", "tls-listener", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tracing", "vise", "zksync_concurrency", @@ -8293,9 +9441,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" dependencies = [ "anyhow", "bit-vec", @@ -8315,9 +9463,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" dependencies = [ "anyhow", "async-trait", @@ -8335,9 +9483,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", "rand 0.8.5", @@ -8362,7 +9510,6 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_eth_client", - "zksync_eth_sender", "zksync_health_check", "zksync_l1_contract_interface", "zksync_node_genesis", @@ -8394,7 +9541,7 @@ version = "0.1.0" dependencies = [ "anyhow", "ctrlc", - "futures 0.3.28", + "futures 0.3.30", "structopt", "tokio", "tracing", @@ -8429,7 +9576,6 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_dal", - "zksync_env_config", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -8457,9 +9603,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -8481,6 +9625,18 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_cs_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "zksync_da_client" version = "0.1.0" @@ -8488,8 +9644,33 @@ dependencies = [ "anyhow", "async-trait", "serde", +] + +[[package]] +name = "zksync_da_clients" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "base58", + "blake2 0.10.6", + "blake2b_simd", + "flate2", + "futures 0.3.30", + "hex", + "jsonrpsee 0.23.2", + "parity-scale-codec", + "scale-encode", + "serde", + "serde_json", + "subxt-metadata", + "subxt-signer", + "tokio", "tracing", "zksync_config", + "zksync_da_client", + "zksync_env_config", + "zksync_object_store", "zksync_types", ] @@ -8499,7 +9680,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "futures 0.3.28", + "futures 0.3.30", "rand 0.8.5", "tokio", "tracing", @@ -8561,24 +9742,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", -] - -[[package]] -name = "zksync_default_da_clients" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "flate2", - "serde", - "tracing", - "zksync_config", - "zksync_da_client", - "zksync_env_config", - "zksync_node_framework", - "zksync_object_store", - "zksync_types", ] [[package]] @@ -8600,7 +9763,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", - "jsonrpsee", + "jsonrpsee 0.23.2", "pretty_assertions", "rlp", "serde_json", @@ -8676,16 +9839,15 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.21.0" +version = "24.24.0" dependencies = [ "anyhow", "assert_matches", "async-trait", "clap 4.4.6", "envy", - "futures 0.3.28", + "futures 0.3.30", "rustc_version", - "semver", "serde", "serde_json", "tempfile", @@ -8754,25 +9916,54 @@ name = "zksync_external_proof_integration_api" version = "0.1.0" dependencies = [ "anyhow", + "async-trait", "axum", "bincode", + "thiserror", "tokio", "tracing", "vise", "zksync_basic_types", - "zksync_config", "zksync_dal", "zksync_object_store", "zksync_prover_interface", ] +[[package]] +name = "zksync_ff" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9524b06780b5e164e84b38840c7c428c739f051f35af6efc4d1285f629ceb88e" +dependencies = [ + "byteorder", + "hex", + "rand 0.4.6", + "serde", + "zksync_ff_derive", +] + +[[package]] +name = "zksync_ff_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" +dependencies = [ + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "proc-macro2 1.0.86", + "quote 1.0.36", + "serde", + "syn 1.0.109", +] + [[package]] name = "zksync_health_check" version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "serde_json", "thiserror", @@ -8792,16 +9983,15 @@ dependencies = [ "vise", "zksync_config", "zksync_dal", - "zksync_prover_dal", "zksync_shared_metrics", "zksync_types", ] [[package]] name = "zksync_kzg" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" +checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" dependencies = [ "boojum", "derivative", @@ -8811,7 +10001,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.4", + "zkevm_circuits 0.150.5", ] [[package]] @@ -8889,7 +10079,7 @@ dependencies = [ "assert_matches", "async-trait", "axum", - "futures 0.3.28", + "futures 0.3.30", "itertools 0.10.5", "once_cell", "reqwest 0.12.5", @@ -8931,11 +10121,12 @@ name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.133.0", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.4", + "assert_matches", + "circuit_sequencer_api 0.133.1", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.142.2", + "circuit_sequencer_api 0.150.5", "ethabi", "hex", "itertools 0.10.5", @@ -8945,18 +10136,18 @@ dependencies = [ "tokio", "tracing", "vise", - "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm2", "zksync_vm_interface", ] @@ -8969,7 +10160,7 @@ dependencies = [ "async-trait", "axum", "chrono", - "futures 0.3.28", + "futures 0.3.30", "governor", "hex", "http 1.1.0", @@ -9008,6 +10199,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_executor", "zksync_web3_decl", ] @@ -9017,8 +10209,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "hex", "rand 0.8.5", "secrecy", + "semver", "tempfile", "test-casing", "thiserror", @@ -9043,11 +10237,13 @@ dependencies = [ "zksync_node_sync", "zksync_node_test_utils", "zksync_protobuf", + "zksync_state", "zksync_state_keeper", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", "zksync_web3_decl", ] @@ -9061,7 +10257,6 @@ dependencies = [ "chrono", "serde", "serde_json", - "test-casing", "test-log", "tokio", "tracing", @@ -9088,7 +10283,6 @@ dependencies = [ "zksync_config", "zksync_dal", "zksync_eth_client", - "zksync_node_test_utils", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -9102,8 +10296,9 @@ dependencies = [ "assert_matches", "async-trait", "ctrlc", - "futures 0.3.28", + "futures 0.3.30", "pin-project-lite", + "semver", "thiserror", "tokio", "tracing", @@ -9118,10 +10313,10 @@ dependencies = [ "zksync_contract_verification_server", "zksync_contracts", "zksync_da_client", + "zksync_da_clients", "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", - "zksync_env_config", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_watch", @@ -9140,8 +10335,6 @@ dependencies = [ "zksync_node_sync", "zksync_object_store", "zksync_proof_data_handler", - "zksync_protobuf_config", - "zksync_prover_dal", "zksync_queued_job_processor", "zksync_reorg_detector", "zksync_state", @@ -9151,6 +10344,7 @@ dependencies = [ "zksync_types", "zksync_utils", "zksync_vlog", + "zksync_vm_executor", "zksync_vm_runner", "zksync_web3_decl", ] @@ -9215,7 +10409,7 @@ dependencies = [ "assert_matches", "async-trait", "chrono", - "futures 0.3.28", + "futures 0.3.30", "once_cell", "serde", "serde_json", @@ -9237,7 +10431,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", "zksync_web3_decl", ] @@ -9280,6 +10474,19 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_pairing" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8412ae5574472fa567a097e183f9a01974b99dd0b5da3bfa1bbe6c57c579aa2" +dependencies = [ + "byteorder", + "cfg-if", + "rand 0.4.6", + "serde", + "zksync_ff", +] + [[package]] name = "zksync_proof_data_handler" version = "0.1.0" @@ -9300,15 +10507,14 @@ dependencies = [ "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_tee_verifier", "zksync_types", ] [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -9327,9 +10533,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck 0.5.0", @@ -9361,23 +10567,13 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_prover_dal" -version = "0.1.0" -dependencies = [ - "sqlx", - "strum", - "zksync_basic_types", - "zksync_db_connection", -] - [[package]] name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "serde", "serde_json", "serde_with", @@ -9427,7 +10623,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.4.6", - "futures 0.3.28", + "futures 0.3.30", "serde_json", "tikv-jemallocator", "tokio", @@ -9438,7 +10634,7 @@ dependencies = [ "zksync_consensus_executor", "zksync_consensus_roles", "zksync_core_leftovers", - "zksync_default_da_clients", + "zksync_da_clients", "zksync_env_config", "zksync_eth_client", "zksync_metadata_calculator", @@ -9470,7 +10666,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "test-casing", "thiserror", @@ -9488,9 +10684,9 @@ dependencies = [ [[package]] name = "zksync_solidity_vk_codegen" -version = "0.1.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bac71750012656b207e8cdb67415823318909077d8c8e235111f0d2feeeeeda" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" dependencies = [ "ethereum-types", "franklin-crypto", @@ -9536,7 +10732,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "hex", "itertools 0.10.5", "once_cell", @@ -9564,7 +10760,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", ] [[package]] @@ -9595,6 +10791,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "envy", "reqwest 0.12.5", "secp256k1", "serde", @@ -9623,13 +10820,10 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_crypto_primitives", - "zksync_dal", - "zksync_db_connection", "zksync_merkle_tree", "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", "zksync_types", "zksync_utils", ] @@ -9650,7 +10844,7 @@ dependencies = [ "zksync_tee_verifier", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", ] [[package]] @@ -9672,11 +10866,12 @@ name = "zksync_types" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "bigdecimal", "bincode", "blake2 0.10.6", "chrono", - "derive_more", + "derive_more 1.0.0-beta.6", "hex", "itertools 0.10.5", "num", @@ -9707,11 +10902,11 @@ name = "zksync_utils" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "bigdecimal", "bincode", - "futures 0.3.28", + "futures 0.3.30", "hex", - "itertools 0.10.5", "num", "once_cell", "rand 0.8.5", @@ -9752,16 +10947,38 @@ dependencies = [ ] [[package]] -name = "zksync_vm_benchmark_harness" +name = "zksync_vm2" version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" dependencies = [ - "assert_matches", + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", + "zksync_vm2_interface", +] + +[[package]] +name = "zksync_vm2_interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "primitive-types", +] + +[[package]] +name = "zksync_vm_executor" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", "once_cell", - "zk_evm 0.133.0", + "tokio", + "tracing", + "vise", "zksync_contracts", + "zksync_dal", "zksync_multivm", - "zksync_state", - "zksync_system_constants", "zksync_types", "zksync_utils", ] @@ -9770,8 +10987,12 @@ dependencies = [ name = "zksync_vm_interface" version = "0.1.0" dependencies = [ + "anyhow", + "assert_matches", + "async-trait", "hex", "serde", + "serde_json", "thiserror", "tracing", "zksync_contracts", @@ -9784,10 +11005,11 @@ name = "zksync_vm_runner" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "backon", "dashmap", - "futures 0.3.28", + "futures 0.3.30", "once_cell", "rand 0.8.5", "serde", @@ -9799,30 +11021,16 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_health_check", - "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", "zksync_object_store", "zksync_prover_interface", "zksync_state", - "zksync_state_keeper", "zksync_storage", "zksync_test_account", "zksync_types", "zksync_utils", - "zksync_vm_utils", -] - -[[package]] -name = "zksync_vm_utils" -version = "0.1.0" -dependencies = [ - "anyhow", - "tokio", - "tracing", - "zksync_contracts", - "zksync_dal", - "zksync_types", + "zksync_vm_executor", "zksync_vm_interface", ] @@ -9833,12 +11041,12 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", - "jsonrpsee", + "futures 0.3.30", + "jsonrpsee 0.23.2", "pin-project-lite", "rand 0.8.5", "rlp", - "rustls", + "rustls 0.23.10", "serde", "serde_json", "test-casing", diff --git a/Cargo.toml b/Cargo.toml index 6ee6ce79e490..5eb862f0bcb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "core/bin/external_node", "core/bin/merkle_tree_consistency_checker", "core/bin/snapshots_creator", + "core/bin/selector_generator", "core/bin/system-constants-generator", "core/bin/verified_sources_fetcher", "core/bin/zksync_server", @@ -38,6 +39,7 @@ members = [ "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", + "core/node/da_clients", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -50,7 +52,6 @@ members = [ "core/lib/dal", "core/lib/env_config", "core/lib/da_client", - "core/lib/default_da_clients", "core/lib/eth_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", @@ -70,7 +71,7 @@ members = [ "core/lib/vlog", "core/lib/multivm", "core/lib/vm_interface", - "core/lib/vm_utils", + "core/lib/vm_executor", "core/lib/web3_decl", "core/lib/snapshots_applier", "core/lib/crypto_primitives", @@ -79,9 +80,6 @@ members = [ "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", - "core/tests/vm-benchmark/harness", - # Parts of prover workspace that are needed for Core workspace - "prover/crates/lib/prover_dal", ] resolver = "2" @@ -123,6 +121,7 @@ envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" futures = "0.3" +glob = "0.3" google-cloud-auth = "0.16.0" google-cloud-storage = "0.20.0" governor = "0.4.2" @@ -199,6 +198,15 @@ trybuild = "1.0" vise = "0.2.0" vise-exporter = "0.2.0" +# DA clients' dependencies +# Avail +base58 = "0.2.0" +scale-encode = "0.5.0" +blake2b_simd = "1.0.2" +subxt-metadata = "0.34.0" +parity-scale-codec = { version = "3.6.9", default-features = false } +subxt-signer = { version = "0.34", default-features = false } + # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. # However, for the historical version of protocol crates, we have lax requirements. Otherwise, @@ -207,38 +215,36 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.4" } -crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.1.0" } -kzg = { package = "zksync_kzg", version = "=0.150.4" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } +kzg = { package = "zksync_kzg", version = "=0.150.5" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } -zk_evm_1_3_3 = { package = "zk_evm", version = "0.133.0" } -zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } -zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } +zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } +zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } +zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.11" -zksync_consensus_bft = "=0.1.0-rc.11" -zksync_consensus_crypto = "=0.1.0-rc.11" -zksync_consensus_executor = "=0.1.0-rc.11" -zksync_consensus_network = "=0.1.0-rc.11" -zksync_consensus_roles = "=0.1.0-rc.11" -zksync_consensus_storage = "=0.1.0-rc.11" -zksync_consensus_utils = "=0.1.0-rc.11" -zksync_protobuf = "=0.1.0-rc.11" -zksync_protobuf_build = "=0.1.0-rc.11" +zksync_concurrency = "=0.1.1" +zksync_consensus_bft = "=0.1.1" +zksync_consensus_crypto = "=0.1.1" +zksync_consensus_executor = "=0.1.1" +zksync_consensus_network = "=0.1.1" +zksync_consensus_roles = "=0.1.1" +zksync_consensus_storage = "=0.1.1" +zksync_consensus_utils = "=0.1.1" +zksync_protobuf = "=0.1.1" +zksync_protobuf_build = "=0.1.1" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } -zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } zksync_vm_interface = { version = "0.1.0", path = "core/lib/vm_interface" } -zksync_vm_utils = { version = "0.1.0", path = "core/lib/vm_utils" } -zksync_vm_benchmark_harness = { version = "0.1.0", path = "core/tests/vm-benchmark/harness" } +zksync_vm_executor = { version = "0.1.0", path = "core/lib/vm_executor" } zksync_basic_types = { version = "0.1.0", path = "core/lib/basic_types" } zksync_circuit_breaker = { version = "0.1.0", path = "core/lib/circuit_breaker" } zksync_config = { version = "0.1.0", path = "core/lib/config" } @@ -250,7 +256,6 @@ zksync_db_connection = { version = "0.1.0", path = "core/lib/db_connection" } zksync_env_config = { version = "0.1.0", path = "core/lib/env_config" } zksync_eth_client = { version = "0.1.0", path = "core/lib/eth_client" } zksync_da_client = { version = "0.1.0", path = "core/lib/da_client" } -zksync_default_da_clients = { version = "0.1.0", path = "core/lib/default_da_clients" } zksync_eth_signer = { version = "0.1.0", path = "core/lib/eth_signer" } zksync_health_check = { version = "0.1.0", path = "core/lib/health_check" } zksync_l1_contract_interface = { version = "0.1.0", path = "core/lib/l1_contract_interface" } @@ -284,6 +289,7 @@ zksync_commitment_generator = { version = "0.1.0", path = "core/node/commitment_ zksync_house_keeper = { version = "0.1.0", path = "core/node/house_keeper" } zksync_node_genesis = { version = "0.1.0", path = "core/node/genesis" } zksync_da_dispatcher = { version = "0.1.0", path = "core/node/da_dispatcher" } +zksync_da_clients = { version = "0.1.0", path = "core/node/da_clients" } zksync_eth_sender = { version = "0.1.0", path = "core/node/eth_sender" } zksync_node_db_pruner = { version = "0.1.0", path = "core/node/db_pruner" } zksync_node_fee_model = { version = "0.1.0", path = "core/node/fee_model" } diff --git a/bin/zkt b/bin/zkt index 9447230486f7..4736401a29d6 100755 --- a/bin/zkt +++ b/bin/zkt @@ -3,6 +3,8 @@ cd $(dirname $0) if which zkup >/dev/null; then + cargo uninstall zk_inception + cargo uninstall zk_supervisor zkup -p .. --alias else echo zkup does not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup diff --git a/chains/era/ZkStack.yaml b/chains/era/ZkStack.yaml index 8dbd49c02c67..306473ba93a8 100644 --- a/chains/era/ZkStack.yaml +++ b/chains/era/ZkStack.yaml @@ -5,6 +5,7 @@ prover_version: NoProofs configs: ./chains/era/configs/ rocks_db_path: ./chains/era/db/ external_node_config_path: ./chains/era/configs/external_node +artifacts_path: ./chains/era/artifacts/ l1_batch_commit_data_generator_mode: Rollup base_token: address: '0x0000000000000000000000000000000000000001' diff --git a/contracts b/contracts index 7ca5517510f2..3a1b5d4b94ff 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7ca5517510f2534a2fc25b16c429fdd4a439b89d +Subproject commit 3a1b5d4b94ffb00f03d436a7db7e48589eb74d39 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index cc0590a79d20..7d4381b09bef 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,69 @@ # Changelog +## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) + + +### Features + +* conditional cbt l1 updates ([#2748](https://github.com/matter-labs/zksync-era/issues/2748)) ([6d18061](https://github.com/matter-labs/zksync-era/commit/6d18061df4a18803d3c6377305ef711ce60317e1)) +* **eth-watch:** do not query events from earliest block ([#2810](https://github.com/matter-labs/zksync-era/issues/2810)) ([1da3f7e](https://github.com/matter-labs/zksync-era/commit/1da3f7ea1df94312e7c6818c17bf4109f888e547)) +* **genesis:** Validate genesis config against L1 ([#2786](https://github.com/matter-labs/zksync-era/issues/2786)) ([b2dd9a5](https://github.com/matter-labs/zksync-era/commit/b2dd9a5c08fecf0a878632b33a32a78aac11c065)) +* Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) +* Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) +* Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) +* **vm-runner:** Implement batch data prefetching ([#2724](https://github.com/matter-labs/zksync-era/issues/2724)) ([d01840d](https://github.com/matter-labs/zksync-era/commit/d01840d5de2cb0f4bead8f1c384b24ba713e6a66)) +* **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) +* **vm:** Simplify VM interface ([#2760](https://github.com/matter-labs/zksync-era/issues/2760)) ([c3bde47](https://github.com/matter-labs/zksync-era/commit/c3bde47c1e7d16bc00f9b089516ed3691e4f3eb1)) +* **zk_toolbox:** add multi-chain CI integration test ([#2594](https://github.com/matter-labs/zksync-era/issues/2594)) ([05c940e](https://github.com/matter-labs/zksync-era/commit/05c940efbd93023c315e5e13c98faee2153cc1cd)) + + +### Bug Fixes + +* **config:** Do not panic for observability config ([#2639](https://github.com/matter-labs/zksync-era/issues/2639)) ([1e768d4](https://github.com/matter-labs/zksync-era/commit/1e768d402012f6c7ce83fdd46c55f830ec31416a)) +* **core:** Batched event processing support for Reth ([#2623](https://github.com/matter-labs/zksync-era/issues/2623)) ([958dfdc](https://github.com/matter-labs/zksync-era/commit/958dfdcac358897bfd4d2a2ddc1633a23dbfcdc9)) +* return correct witness inputs ([#2770](https://github.com/matter-labs/zksync-era/issues/2770)) ([2516e2e](https://github.com/matter-labs/zksync-era/commit/2516e2e5c83673687d61d143daa70e98ccecce53)) +* **tee-prover:** increase retries to reduce spurious alerts ([#2776](https://github.com/matter-labs/zksync-era/issues/2776)) ([4fdc806](https://github.com/matter-labs/zksync-era/commit/4fdc80636437090f6ebcfa4e2f1eb50edf53631a)) +* **tee-prover:** mitigate panic on redeployments ([#2764](https://github.com/matter-labs/zksync-era/issues/2764)) ([178b386](https://github.com/matter-labs/zksync-era/commit/178b38644f507c5f6d12ba862d0c699e87985dd7)) +* **tee:** lowercase enum TEE types ([#2798](https://github.com/matter-labs/zksync-era/issues/2798)) ([0f2f9bd](https://github.com/matter-labs/zksync-era/commit/0f2f9bd9ef4c2c7ba98a1fdbfca15d1de2b29997)) +* **vm-runner:** Fix statement timeouts in VM playground ([#2772](https://github.com/matter-labs/zksync-era/issues/2772)) ([d3cd553](https://github.com/matter-labs/zksync-era/commit/d3cd553888a5c903c6eae13a88e92c11602e93de)) + + +### Performance Improvements + +* **vm:** Fix VM performance regression on CI loadtest ([#2782](https://github.com/matter-labs/zksync-era/issues/2782)) ([bc0d7d5](https://github.com/matter-labs/zksync-era/commit/bc0d7d5935c8f5409a8e53f1c04c5141409aef31)) + +## [24.23.0](https://github.com/matter-labs/zksync-era/compare/core-v24.22.0...core-v24.23.0) (2024-08-28) + + +### Features + +* Refactor metrics/make API use binaries ([#2735](https://github.com/matter-labs/zksync-era/issues/2735)) ([8ed086a](https://github.com/matter-labs/zksync-era/commit/8ed086afecfcad30bfda44fc4d29a00beea71cca)) + + +### Bug Fixes + +* **api:** Fix duplicate DB connection acquired in `eth_call` ([#2763](https://github.com/matter-labs/zksync-era/issues/2763)) ([74b764c](https://github.com/matter-labs/zksync-era/commit/74b764c12e6daa410c611cec42455a00e68ed912)) +* **vm:** Fix used bytecodes divergence ([#2741](https://github.com/matter-labs/zksync-era/issues/2741)) ([923e33e](https://github.com/matter-labs/zksync-era/commit/923e33e81bba83f72b97ca9590c5cdf2da2a311b)) + +## [24.22.0](https://github.com/matter-labs/zksync-era/compare/core-v24.21.0...core-v24.22.0) (2024-08-27) + + +### Features + +* add flag to enable/disable DA inclusion verification ([#2647](https://github.com/matter-labs/zksync-era/issues/2647)) ([b425561](https://github.com/matter-labs/zksync-era/commit/b4255618708349c51f60f5c7fc26f9356d32b6ff)) +* **Base token:** add cbt metrics ([#2720](https://github.com/matter-labs/zksync-era/issues/2720)) ([58438eb](https://github.com/matter-labs/zksync-era/commit/58438eb174c30edf62e2ff8abb74567de2a4bea8)) +* Change default_protective_reads_persistence_enabled to false ([#2716](https://github.com/matter-labs/zksync-era/issues/2716)) ([8d0eee7](https://github.com/matter-labs/zksync-era/commit/8d0eee7ca8fe117b2ee286c6080bfa0057ee31ae)) +* **vm:** Extract oneshot VM executor interface ([#2671](https://github.com/matter-labs/zksync-era/issues/2671)) ([951d5f2](https://github.com/matter-labs/zksync-era/commit/951d5f208e5d16a5d95878dd345a8bd2a4144aa7)) +* **zk_toolbox:** Add holesky testnet as layer1 network ([#2632](https://github.com/matter-labs/zksync-era/issues/2632)) ([d9266e5](https://github.com/matter-labs/zksync-era/commit/d9266e5ef3910732666c00c1324256fb5b54452d)) + + +### Bug Fixes + +* **api:** `tx.gas_price` field ([#2734](https://github.com/matter-labs/zksync-era/issues/2734)) ([aea3726](https://github.com/matter-labs/zksync-era/commit/aea3726c88b4e881bcd0f4a60ff32a730f200938)) +* **base_token_adjuster:** bug with a wrong metrics namespace ([#2744](https://github.com/matter-labs/zksync-era/issues/2744)) ([64b2ff8](https://github.com/matter-labs/zksync-era/commit/64b2ff8b81dcc146cd0535eb0d2d898c18ad5f7f)) +* **eth-sender:** missing Gateway migration changes ([#2732](https://github.com/matter-labs/zksync-era/issues/2732)) ([a4170e9](https://github.com/matter-labs/zksync-era/commit/a4170e9e7f321a1062495ec586e0ce9186269088)) +* **proof_data_handler:** TEE blob fetching error handling ([#2674](https://github.com/matter-labs/zksync-era/issues/2674)) ([c162510](https://github.com/matter-labs/zksync-era/commit/c162510598b45dc062c2c91085868f8aa966360e)) + ## [24.21.0](https://github.com/matter-labs/zksync-era/compare/core-v24.20.0...core-v24.21.0) (2024-08-22) diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 36640049e446..a8162de13e9d 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -9,14 +9,14 @@ use zksync_contract_verifier_lib::ContractVerifier; use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_queued_job_processor::JobProcessor; -use zksync_utils::{wait_for_tasks::ManagedTasks, workspace_dir_or_current_dir}; +use zksync_utils::{env::Workspace, wait_for_tasks::ManagedTasks}; use zksync_vlog::prometheus::PrometheusExporterConfig; async fn update_compiler_versions(connection_pool: &ConnectionPool) { let mut storage = connection_pool.connection().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); - let zksync_home = workspace_dir_or_current_dir(); + let zksync_home = Workspace::locate().core(); let zksolc_path = zksync_home.join("etc/zksolc-bin/"); let zksolc_versions: Vec = std::fs::read_dir(zksolc_path) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 33a460daba50..a1d3951ff3d8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.21.0" # x-release-please-version +version = "24.24.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true @@ -59,7 +59,6 @@ envy.workspace = true url.workspace = true clap = { workspace = true, features = ["derive"] } serde_json.workspace = true -semver.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index cd4e845b8f3e..f8241deae26c 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1217,6 +1217,7 @@ pub(crate) struct ExternalNodeConfig { pub observability: ObservabilityENConfig, pub experimental: ExperimentalENConfig, pub consensus: Option, + pub consensus_secrets: Option, pub api_component: ApiComponentConfig, pub tree_component: TreeComponentConfig, pub remote: R, @@ -1240,6 +1241,8 @@ impl ExternalNodeConfig<()> { tree_component: envy::prefixed("EN_TREE_") .from_env::() .context("could not load external node config (tree component params)")?, + consensus_secrets: read_consensus_secrets() + .context("config::read_consensus_secrets()")?, remote: (), }) } @@ -1262,7 +1265,7 @@ impl ExternalNodeConfig<()> { .map(read_yaml_repr::) .transpose() .context("failed decoding consensus YAML config")?; - + let consensus_secrets = secrets_config.consensus.clone(); let required = RequiredENConfig::from_configs( &general_config, &external_node_config, @@ -1298,6 +1301,7 @@ impl ExternalNodeConfig<()> { consensus, api_component, tree_component, + consensus_secrets, remote: (), }) } @@ -1332,6 +1336,7 @@ impl ExternalNodeConfig<()> { consensus: self.consensus, tree_component: self.tree_component, api_component: self.api_component, + consensus_secrets: self.consensus_secrets, remote, }) } @@ -1348,6 +1353,7 @@ impl ExternalNodeConfig { observability: ObservabilityENConfig::default(), experimental: ExperimentalENConfig::mock(), consensus: None, + consensus_secrets: None, api_component: ApiComponentConfig { tree_api_remote_url: None, }, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index c30cc1a432bb..98e286c253a2 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -56,11 +56,7 @@ use zksync_node_framework::{ }; use zksync_state::RocksdbStorageOptions; -use crate::{ - config::{self, ExternalNodeConfig}, - metrics::framework::ExternalNodeMetricsLayer, - Component, -}; +use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; /// Builder for the external node. #[derive(Debug)] @@ -240,9 +236,14 @@ impl ExternalNodeBuilder { fn add_consensus_layer(mut self) -> anyhow::Result { let config = self.config.consensus.clone(); - let secrets = - config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ExternalNodeConsensusLayer { config, secrets }; + let secrets = self.config.consensus_secrets.clone(); + let layer = ExternalNodeConsensusLayer { + build_version: crate::metadata::SERVER_VERSION + .parse() + .context("CRATE_VERSION.parse()")?, + config, + secrets, + }; self.node.add_layer(layer); Ok(self) } diff --git a/core/bin/selector_generator/Cargo.toml b/core/bin/selector_generator/Cargo.toml new file mode 100644 index 000000000000..e0b0afe0ae2c --- /dev/null +++ b/core/bin/selector_generator/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "selector_generator" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true +publish = false + +[dependencies] +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +sha3.workspace = true +glob.workspace = true +clap = { workspace = true, features = ["derive"] } \ No newline at end of file diff --git a/core/bin/selector_generator/README.md b/core/bin/selector_generator/README.md new file mode 100644 index 000000000000..a954613c7e45 --- /dev/null +++ b/core/bin/selector_generator/README.md @@ -0,0 +1,13 @@ +# Generates the list of solidity selectors + +This tool generates a mapping from solidity selectors to function names. + +The output json file can be used by multiple tools to improve debugging and readability. + +By default, it appends the newly found selectors into the list. + +To run, first make sure that you have your contracts compiled and then run: + +``` +cargo run ../../../contracts ../../../etc/selector-generator-data/selectors.json +``` diff --git a/core/bin/selector_generator/src/main.rs b/core/bin/selector_generator/src/main.rs new file mode 100644 index 000000000000..ad6180413f14 --- /dev/null +++ b/core/bin/selector_generator/src/main.rs @@ -0,0 +1,105 @@ +use std::{ + collections::HashMap, + fs::{File, OpenOptions}, + io::{self}, +}; + +use clap::Parser; +use glob::glob; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Keccak256}; + +#[derive(Debug, Serialize, Deserialize)] +struct ABIEntry { + #[serde(rename = "type")] + entry_type: String, + name: Option, + inputs: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ABIInput { + #[serde(rename = "type")] + input_type: String, +} + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + contracts_dir: String, + output_file: String, +} + +/// Computes solidity selector for a given method and arguments. +fn compute_selector(name: &str, inputs: &[ABIInput]) -> String { + let signature = format!( + "{}({})", + name, + inputs + .iter() + .map(|i| i.input_type.clone()) + .collect::>() + .join(",") + ); + let mut hasher = Keccak256::new(); + hasher.update(signature); + format!("{:x}", hasher.finalize())[..8].to_string() +} + +/// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. +fn process_files(directory: &str, output_file: &str) -> io::Result<()> { + let mut selectors: HashMap = match File::open(output_file) { + Ok(file) => serde_json::from_reader(file).unwrap_or_default(), + Err(_) => HashMap::new(), + }; + let selectors_before = selectors.len(); + let mut analyzed_files = 0; + + for entry in glob(&format!("{}/**/*.json", directory)).expect("Failed to read glob pattern") { + match entry { + Ok(path) => { + let file_path = path.clone(); + let file = File::open(path)?; + let json: Result = serde_json::from_reader(file); + + if let Ok(json) = json { + if let Some(abi) = json.get("abi").and_then(|v| v.as_array()) { + analyzed_files += 1; + for item in abi { + let entry: ABIEntry = serde_json::from_value(item.clone()).unwrap(); + if entry.entry_type == "function" { + if let (Some(name), Some(inputs)) = (entry.name, entry.inputs) { + let selector = compute_selector(&name, &inputs); + selectors.entry(selector).or_insert(name); + } + } + } + } + } else { + eprintln!("Error parsing file: {:?} - ignoring.", file_path) + } + } + Err(e) => eprintln!("Error reading file: {:?}", e), + } + } + println!( + "Analyzed {} files. Added {} selectors (before: {} after: {})", + analyzed_files, + selectors.len() - selectors_before, + selectors_before, + selectors.len() + ); + + let file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(output_file)?; + serde_json::to_writer_pretty(file, &selectors)?; + Ok(()) +} + +fn main() -> io::Result<()> { + let args = Cli::parse(); + process_files(&args.contracts_dir, &args.output_file) +} diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index 7ada47302248..cc2e031106b8 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -17,7 +17,7 @@ use zksync_types::{ IntrinsicSystemGasConstants, ProtocolVersionId, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; // For configs we will use the default value of `800_000` to represent the rough amount of L1 gas // needed to cover the batch expenses. @@ -210,7 +210,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst } fn save_file(path_in_repo: &str, content: String) { - let zksync_home = workspace_dir_or_current_dir(); + let zksync_home = Workspace::locate().core(); let fee_constants_path = zksync_home.join(path_in_repo); fs::write(fee_constants_path, content) diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 96de0537d538..3775b3c0e243 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -10,7 +10,7 @@ use zksync_multivm::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, + VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ diff --git a/core/bin/verified_sources_fetcher/README.md b/core/bin/verified_sources_fetcher/README.md new file mode 100644 index 000000000000..0abddb7a8843 --- /dev/null +++ b/core/bin/verified_sources_fetcher/README.md @@ -0,0 +1,4 @@ +# Verified sources fetcher + +This tool downloads verified contract sources from SQL database from `contract_verification_requests` table. Then it +saves sources and compilation settings to files. diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index 72eff1384e2d..031183924064 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -21,7 +21,7 @@ zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_node_genesis.workspace = true -zksync_default_da_clients.workspace = true +zksync_da_clients.workspace = true # Consensus dependenices zksync_consensus_crypto.workspace = true diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 7e0ff0e49201..84898d6da067 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -17,9 +17,9 @@ use zksync_config::{ L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, - ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, + DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, + GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core_leftovers::{ temp_config_store::{decode_yaml_repr, TempConfigStore}, @@ -199,6 +199,7 @@ fn load_env_config() -> anyhow::Result { gas_adjuster_config: GasAdjusterConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_client_config: DAClientConfig::from_env().ok(), da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 6b0315200651..069a7a799ab5 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -3,14 +3,13 @@ use anyhow::Context; use zksync_config::{ - configs::{eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, Secrets}, + configs::{ + da_client::DAClient, eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, + Secrets, + }, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; -use zksync_default_da_clients::{ - no_da::wiring_layer::NoDAClientWiringLayer, - object_store::{config::DAObjectStoreConfig, wiring_layer::ObjectStorageClientWiringLayer}, -}; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ tx_sender::{ApiContracts, TxSenderConfig}, @@ -28,6 +27,10 @@ use zksync_node_framework::{ commitment_generator::CommitmentGeneratorLayer, consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, + da_clients::{ + avail::AvailWiringLayer, no_da::NoDAClientWiringLayer, + object_store::ObjectStorageClientWiringLayer, + }, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -122,7 +125,6 @@ impl MainNodeBuilder { let pools_layer = PoolsLayerBuilder::empty(config, secrets) .with_master(true) .with_replica(true) - .with_prover(true) // Used by house keeper. .build(); self.node.add_layer(pools_layer); Ok(self) @@ -365,6 +367,7 @@ impl MainNodeBuilder { subscriptions_limit: Some(rpc_config.subscriptions_limit()), batch_request_size_limit: Some(rpc_config.max_batch_request_size()), response_body_size_limit: Some(rpc_config.max_response_body_size()), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::http( @@ -446,18 +449,9 @@ impl MainNodeBuilder { fn add_house_keeper_layer(mut self) -> anyhow::Result { let house_keeper_config = try_load_config!(self.configs.house_keeper_config); - let fri_prover_config = try_load_config!(self.configs.prover_config); - let fri_witness_generator_config = try_load_config!(self.configs.witness_generator_config); - let fri_prover_group_config = try_load_config!(self.configs.prover_group_config); - let fri_proof_compressor_config = try_load_config!(self.configs.proof_compressor_config); - - self.node.add_layer(HouseKeeperLayer::new( - house_keeper_config, - fri_prover_config, - fri_witness_generator_config, - fri_prover_group_config, - fri_proof_compressor_config, - )); + + self.node + .add_layer(HouseKeeperLayer::new(house_keeper_config)); Ok(self) } @@ -509,16 +503,23 @@ impl MainNodeBuilder { Ok(self) } - fn add_no_da_client_layer(mut self) -> anyhow::Result { - self.node.add_layer(NoDAClientWiringLayer); - Ok(self) - } + fn add_da_client_layer(mut self) -> anyhow::Result { + let Some(da_client_config) = self.configs.da_client_config.clone() else { + tracing::warn!("No config for DA client, using the NoDA client"); + self.node.add_layer(NoDAClientWiringLayer); + return Ok(self); + }; + + match da_client_config.client { + DAClient::Avail(config) => { + self.node.add_layer(AvailWiringLayer::new(config)); + } + DAClient::ObjectStore(config) => { + self.node + .add_layer(ObjectStorageClientWiringLayer::new(config)); + } + } - #[allow(dead_code)] - fn add_object_storage_da_client_layer(mut self) -> anyhow::Result { - let object_store_config = DAObjectStoreConfig::from_env()?; - self.node - .add_layer(ObjectStorageClientWiringLayer::new(object_store_config.0)); Ok(self) } @@ -759,7 +760,7 @@ impl MainNodeBuilder { self = self.add_commitment_generator_layer()?; } Component::DADispatcher => { - self = self.add_no_da_client_layer()?.add_da_dispatcher_layer()?; + self = self.add_da_client_layer()?.add_da_dispatcher_layer()?; } Component::VmRunnerProtectiveReads => { self = self.add_vm_runner_protective_reads_layer()?; diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 0c89971fd305..85908eebeaaa 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -14,8 +14,9 @@ publish = false [dependencies] anyhow.workspace = true async-trait.workspace = true +envy.workspace = true reqwest.workspace = true -secp256k1.workspace = true +secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/core/bin/zksync_tee_prover/src/config.rs b/core/bin/zksync_tee_prover/src/config.rs index 2a77c3752180..1c2eb229d616 100644 --- a/core/bin/zksync_tee_prover/src/config.rs +++ b/core/bin/zksync_tee_prover/src/config.rs @@ -1,12 +1,13 @@ -use std::path::PathBuf; +use std::{path::PathBuf, time::Duration}; use secp256k1::SecretKey; +use serde::Deserialize; use url::Url; use zksync_env_config::FromEnv; use zksync_types::tee_types::TeeType; /// Configuration for the TEE prover. -#[derive(Debug)] +#[derive(Debug, Clone, Deserialize)] pub(crate) struct TeeProverConfig { /// The private key used to sign the proofs. pub signing_key: SecretKey, @@ -16,6 +17,26 @@ pub(crate) struct TeeProverConfig { pub tee_type: TeeType, /// TEE proof data handler API. pub api_url: Url, + /// Number of retries for retriable errors before giving up on recovery (i.e., returning an error + /// from [`Self::run()`]). + pub max_retries: usize, + /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval + /// will be multiplied by [`Self.retry_backoff_multiplier`]. + pub initial_retry_backoff_sec: u64, + /// Multiplier for the back-off interval when retrying recovery on a retriable error. + pub retry_backoff_multiplier: f32, + /// Maximum back-off interval when retrying recovery on a retriable error. + pub max_backoff_sec: u64, +} + +impl TeeProverConfig { + pub fn initial_retry_backoff(&self) -> Duration { + Duration::from_secs(self.initial_retry_backoff_sec) + } + + pub fn max_backoff(&self) -> Duration { + Duration::from_secs(self.max_backoff_sec) + } } impl FromEnv for TeeProverConfig { @@ -23,17 +44,17 @@ impl FromEnv for TeeProverConfig { /// /// Example usage of environment variables for tests: /// ``` - /// export TEE_SIGNING_KEY="b50b38c8d396c88728fc032ece558ebda96907a0b1a9340289715eef7bf29deb" - /// export TEE_QUOTE_FILE="/tmp/test" # run `echo test > /tmp/test` beforehand - /// export TEE_TYPE="sgx" - /// export TEE_API_URL="http://127.0.0.1:3320" + /// export TEE_PROVER_SIGNING_KEY="b50b38c8d396c88728fc032ece558ebda96907a0b1a9340289715eef7bf29deb" + /// export TEE_PROVER_ATTESTATION_QUOTE_FILE_PATH="/tmp/test" # run `echo test > /tmp/test` beforehand + /// export TEE_PROVER_TEE_TYPE="sgx" + /// export TEE_PROVER_API_URL="http://127.0.0.1:3320" + /// export TEE_PROVER_MAX_RETRIES=10 + /// export TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC=1 + /// export TEE_PROVER_RETRY_BACKOFF_MULTIPLIER=2.0 + /// export TEE_PROVER_MAX_BACKOFF_SEC=128 /// ``` fn from_env() -> anyhow::Result { - Ok(Self { - signing_key: std::env::var("TEE_SIGNING_KEY")?.parse()?, - attestation_quote_file_path: std::env::var("TEE_QUOTE_FILE")?.parse()?, - tee_type: std::env::var("TEE_TYPE")?.parse()?, - api_url: std::env::var("TEE_API_URL")?.parse()?, - }) + let config: Self = envy::prefixed("TEE_PROVER_").from_env()?; + Ok(config) } } diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 41f3be2ea052..70c6f888185a 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -32,8 +32,6 @@ fn main() -> anyhow::Result<()> { ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; let tee_prover_config = TeeProverConfig::from_env()?; - let attestation_quote_bytes = std::fs::read(tee_prover_config.attestation_quote_file_path)?; - let prometheus_config = PrometheusConfig::from_env()?; let mut builder = ZkStackServiceBuilder::new()?; @@ -45,12 +43,7 @@ fn main() -> anyhow::Result<()> { builder .add_layer(SigintHandlerLayer) - .add_layer(TeeProverLayer::new( - tee_prover_config.api_url, - tee_prover_config.signing_key, - attestation_quote_bytes, - tee_prover_config.tee_type, - )); + .add_layer(TeeProverLayer::new(tee_prover_config)); if let Some(gateway) = prometheus_config.gateway_endpoint() { let exporter_config = diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 7f874533b4b3..1511f0c88e3d 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -1,7 +1,6 @@ -use std::{fmt, time::Duration}; +use std::fmt; -use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1, SecretKey}; -use url::Url; +use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1}; use zksync_basic_types::H256; use zksync_node_framework::{ service::StopReceiver, @@ -11,32 +10,21 @@ use zksync_node_framework::{ }; use zksync_prover_interface::inputs::TeeVerifierInput; use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::L1BatchNumber; -use crate::{api_client::TeeApiClient, error::TeeProverError, metrics::METRICS}; +use crate::{ + api_client::TeeApiClient, config::TeeProverConfig, error::TeeProverError, metrics::METRICS, +}; /// Wiring layer for `TeeProver` #[derive(Debug)] pub(crate) struct TeeProverLayer { - api_url: Url, - signing_key: SecretKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, + config: TeeProverConfig, } impl TeeProverLayer { - pub fn new( - api_url: Url, - signing_key: SecretKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, - ) -> Self { - Self { - api_url, - signing_key, - attestation_quote_bytes, - tee_type, - } + pub fn new(config: TeeProverConfig) -> Self { + Self { config } } } @@ -56,13 +44,10 @@ impl WiringLayer for TeeProverLayer { } async fn wire(self, _input: Self::Input) -> Result { + let api_url = self.config.api_url.clone(); let tee_prover = TeeProver { - config: Default::default(), - signing_key: self.signing_key, - public_key: self.signing_key.public_key(&Secp256k1::new()), - attestation_quote_bytes: self.attestation_quote_bytes, - tee_type: self.tee_type, - api_client: TeeApiClient::new(self.api_url), + config: self.config, + api_client: TeeApiClient::new(api_url), }; Ok(LayerOutput { tee_prover }) } @@ -70,10 +55,6 @@ impl WiringLayer for TeeProverLayer { pub(crate) struct TeeProver { config: TeeProverConfig, - signing_key: SecretKey, - public_key: PublicKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, api_client: TeeApiClient, } @@ -81,9 +62,6 @@ impl fmt::Debug for TeeProver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TeeProver") .field("config", &self.config) - .field("public_key", &self.public_key) - .field("attestation_quote_bytes", &self.attestation_quote_bytes) - .field("tee_type", &self.tee_type) .finish() } } @@ -101,7 +79,7 @@ impl TeeProver { let batch_number = verification_result.batch_number; let msg_to_sign = Message::from_slice(root_hash_bytes) .map_err(|e| TeeProverError::Verification(e.into()))?; - let signature = self.signing_key.sign_ecdsa(msg_to_sign); + let signature = self.config.signing_key.sign_ecdsa(msg_to_sign); observer.observe(); Ok((signature, batch_number, verification_result.value_hash)) } @@ -111,17 +89,17 @@ impl TeeProver { } } - async fn step(&self) -> Result, TeeProverError> { - match self.api_client.get_job(self.tee_type).await? { + async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { + match self.api_client.get_job(self.config.tee_type).await? { Some(job) => { let (signature, batch_number, root_hash) = self.verify(*job)?; self.api_client .submit_proof( batch_number, signature, - &self.public_key, + public_key, root_hash, - self.tee_type, + self.config.tee_type, ) .await?; Ok(Some(batch_number)) @@ -134,30 +112,6 @@ impl TeeProver { } } -/// TEE prover configuration options. -#[derive(Debug, Clone)] -pub struct TeeProverConfig { - /// Number of retries for retriable errors before giving up on recovery (i.e., returning an error - /// from [`Self::run()`]). - pub max_retries: usize, - /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval - /// will be multiplied by [`Self.retry_backoff_multiplier`]. - pub initial_retry_backoff: Duration, - pub retry_backoff_multiplier: f32, - pub max_backoff: Duration, -} - -impl Default for TeeProverConfig { - fn default() -> Self { - Self { - max_retries: 5, - initial_retry_backoff: Duration::from_secs(1), - retry_backoff_multiplier: 2.0, - max_backoff: Duration::from_secs(128), - } - } -} - #[async_trait::async_trait] impl Task for TeeProver { fn id(&self) -> TaskId { @@ -167,12 +121,15 @@ impl Task for TeeProver { async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { tracing::info!("Starting the task {}", self.id()); + let config = &self.config; + let attestation_quote_bytes = std::fs::read(&config.attestation_quote_file_path)?; + let public_key = config.signing_key.public_key(&Secp256k1::new()); self.api_client - .register_attestation(self.attestation_quote_bytes.clone(), &self.public_key) + .register_attestation(attestation_quote_bytes, &public_key) .await?; let mut retries = 1; - let mut backoff = self.config.initial_retry_backoff; + let mut backoff = config.initial_retry_backoff(); let mut observer = METRICS.job_waiting_time.start(); loop { @@ -180,11 +137,11 @@ impl Task for TeeProver { tracing::info!("Stop signal received, shutting down TEE Prover component"); return Ok(()); } - let result = self.step().await; + let result = self.step(&public_key).await; let need_to_sleep = match result { Ok(batch_number) => { retries = 1; - backoff = self.config.initial_retry_backoff; + backoff = config.initial_retry_backoff(); if let Some(batch_number) = batch_number { observer.observe(); observer = METRICS.job_waiting_time.start(); @@ -198,14 +155,14 @@ impl Task for TeeProver { } Err(err) => { METRICS.network_errors_counter.inc_by(1); - if !err.is_retriable() || retries > self.config.max_retries { + if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } - tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); + tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", config.max_retries, backoff.as_millis()); retries += 1; backoff = std::cmp::min( - backoff.mul_f32(self.config.retry_backoff_multiplier), - self.config.max_backoff, + backoff.mul_f32(config.retry_backoff_multiplier), + config.max_backoff(), ); true } diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 5969cca6b8c0..9de9920e86f6 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -152,6 +152,29 @@ impl AggregationRound { AggregationRound::Scheduler => None, } } + + /// Returns all the circuit IDs that correspond to a particular + /// aggregation round. + /// + /// For example, in aggregation round 0, the circuit ids should be 1 to 15 + 255 (EIP4844). + /// In aggregation round 1, the circuit ids should be 3 to 18. + /// In aggregation round 2, the circuit ids should be 2. + /// In aggregation round 3, the circuit ids should be 255. + /// In aggregation round 4, the circuit ids should be 1. + pub fn circuit_ids(self) -> Vec { + match self { + AggregationRound::BasicCircuits => (1..=15) + .chain(once(255)) + .map(|circuit_id| CircuitIdRoundTuple::new(circuit_id, self as u8)) + .collect(), + AggregationRound::LeafAggregation => (3..=18) + .map(|circuit_id| CircuitIdRoundTuple::new(circuit_id, self as u8)) + .collect(), + AggregationRound::NodeAggregation => vec![CircuitIdRoundTuple::new(2, self as u8)], + AggregationRound::RecursionTip => vec![CircuitIdRoundTuple::new(255, self as u8)], + AggregationRound::Scheduler => vec![CircuitIdRoundTuple::new(1, self as u8)], + } + } } impl std::fmt::Display for AggregationRound { @@ -265,33 +288,17 @@ impl CircuitProverStats { impl Default for CircuitProverStats { fn default() -> Self { - let mut stats = HashMap::new(); - for circuit in (1..=15).chain(once(255)) { - stats.insert( - CircuitIdRoundTuple::new(circuit, 0), - JobCountStatistics::default(), - ); - } - for circuit in 3..=18 { - stats.insert( - CircuitIdRoundTuple::new(circuit, 1), - JobCountStatistics::default(), - ); - } - stats.insert( - CircuitIdRoundTuple::new(2, 2), - JobCountStatistics::default(), - ); - stats.insert( - CircuitIdRoundTuple::new(255, 3), - JobCountStatistics::default(), - ); - stats.insert( - CircuitIdRoundTuple::new(1, 4), - JobCountStatistics::default(), - ); + let circuits_prover_stats = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|round| { + let circuit_ids = round.circuit_ids(); + circuit_ids.into_iter().map(|circuit_id_round_tuple| { + (circuit_id_round_tuple, JobCountStatistics::default()) + }) + }) + .collect(); Self { - circuits_prover_stats: stats, + circuits_prover_stats, } } } diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 265c06987afd..640a92c00da0 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -238,7 +238,12 @@ impl Detokenize for VerifierParams { #[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct L1VerifierConfig { - pub recursion_scheduler_level_vk_hash: H256, + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, } impl From for VmVersion { @@ -394,4 +399,22 @@ mod tests { assert_eq!(version, unpacked); } + + #[test] + fn test_verifier_config_serde() { + let de = [ + r#"{"recursion_scheduler_level_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + r#"{"snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + ]; + for de in de.iter() { + let _: L1VerifierConfig = serde_json::from_str(de) + .unwrap_or_else(|err| panic!("Failed deserialization. String: {de}, error {err}")); + } + let ser = L1VerifierConfig { + snark_wrapper_vk_hash: H256::repeat_byte(0x11), + }; + let ser_str = serde_json::to_string(&ser).unwrap(); + let expected_str = r#"{"recursion_scheduler_level_vk_hash":"0x1111111111111111111111111111111111111111111111111111111111111111"}"#; + assert_eq!(ser_str, expected_str); + } } diff --git a/core/lib/basic_types/src/tee_types.rs b/core/lib/basic_types/src/tee_types.rs index c9be9b6e99d8..d49f2f183885 100644 --- a/core/lib/basic_types/src/tee_types.rs +++ b/core/lib/basic_types/src/tee_types.rs @@ -1,9 +1,49 @@ +use std::fmt; + use serde::{Deserialize, Serialize}; -use strum::{Display, EnumString}; -#[derive(Debug, Clone, Copy, PartialEq, EnumString, Display, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] #[non_exhaustive] pub enum TeeType { - #[strum(serialize = "sgx")] Sgx, } + +impl fmt::Display for TeeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TeeType::Sgx => write!(f, "sgx"), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json; + + use super::*; + + #[test] + fn test_serialize_teetype() { + let json_str = "\"sgx\""; + let tee_type: TeeType = serde_json::from_str(json_str).unwrap(); + assert_eq!(tee_type, TeeType::Sgx); + + for json_str in &["\"Sgx\"", "\"SGX\""] { + let result: Result = serde_json::from_str(json_str); + assert!(result.is_err()); + } + } + + #[test] + fn test_deserialize_teetype() { + let tee_type = TeeType::Sgx; + let json_str = serde_json::to_string(&tee_type).unwrap(); + assert_eq!(json_str, "\"sgx\""); + } + + #[test] + fn test_display_teetype() { + assert_eq!(TeeType::Sgx.to_string(), "sgx"); + } +} diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 9bc00b475d4a..926002e561c0 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -19,6 +19,3 @@ tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true tracing.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index b13948448cdd..d1ab5ce8438f 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -24,6 +24,9 @@ rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +[dev-dependencies] +serde_json.workspace = true + [features] default = [] observability_ext = ["zksync_vlog", "tracing"] diff --git a/core/lib/config/src/configs/base_token_adjuster.rs b/core/lib/config/src/configs/base_token_adjuster.rs index c8a0fe6312e3..d49a3853ff18 100644 --- a/core/lib/config/src/configs/base_token_adjuster.rs +++ b/core/lib/config/src/configs/base_token_adjuster.rs @@ -35,6 +35,9 @@ const DEFAULT_PRICE_FETCHING_SLEEP_MS: u64 = 5_000; /// Default number of milliseconds to sleep between transaction sending attempts const DEFAULT_L1_TX_SENDING_SLEEP_MS: u64 = 30_000; +/// Default number of percent that the quote should change in order for update to be propagated to L1 +const DEFAULT_L1_UPDATE_DEVIATION_PERCENTAGE: u32 = 10; + /// Default maximum acceptable priority fee in gwei to prevent sending transaction with extremely high priority fee. const DEFAULT_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI: u64 = 100_000_000_000; @@ -79,6 +82,11 @@ pub struct BaseTokenAdjusterConfig { #[serde(default = "BaseTokenAdjusterConfig::default_l1_tx_sending_sleep_ms")] pub l1_tx_sending_sleep_ms: u64, + /// How many percent a quote needs to change in order for update to be propagated to L1. + /// Exists to save on gas. + #[serde(default = "BaseTokenAdjusterConfig::default_l1_update_deviation_percentage")] + pub l1_update_deviation_percentage: u32, + /// Maximum number of attempts to fetch quote from a remote API before failing over #[serde(default = "BaseTokenAdjusterConfig::default_price_fetching_max_attempts")] pub price_fetching_max_attempts: u32, @@ -107,6 +115,7 @@ impl Default for BaseTokenAdjusterConfig { l1_receipt_checking_sleep_ms: Self::default_l1_receipt_checking_sleep_ms(), l1_tx_sending_max_attempts: Self::default_l1_tx_sending_max_attempts(), l1_tx_sending_sleep_ms: Self::default_l1_tx_sending_sleep_ms(), + l1_update_deviation_percentage: Self::default_l1_update_deviation_percentage(), price_fetching_sleep_ms: Self::default_price_fetching_sleep_ms(), price_fetching_max_attempts: Self::default_price_fetching_max_attempts(), halt_on_error: Self::default_halt_on_error(), @@ -170,6 +179,9 @@ impl BaseTokenAdjusterConfig { pub fn default_l1_tx_sending_sleep_ms() -> u64 { DEFAULT_L1_TX_SENDING_SLEEP_MS } + pub fn default_l1_update_deviation_percentage() -> u32 { + DEFAULT_L1_UPDATE_DEVIATION_PERCENTAGE + } pub fn default_price_fetching_sleep_ms() -> u64 { DEFAULT_PRICE_FETCHING_SLEEP_MS diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 50885a6ec6fe..759e13128338 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,7 +1,8 @@ use std::collections::{BTreeMap, BTreeSet}; -use secrecy::{ExposeSecret as _, Secret}; -use zksync_basic_types::L2ChainId; +use secrecy::ExposeSecret as _; +pub use secrecy::Secret; +use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. @@ -89,6 +90,8 @@ pub struct GenesisSpec { /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, + /// Address of the registry contract. + pub registry_address: Option, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs new file mode 100644 index 000000000000..e8d119787912 --- /dev/null +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -0,0 +1,11 @@ +use serde::Deserialize; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailConfig { + pub api_node_url: String, + pub bridge_api_url: String, + pub seed: String, + pub app_id: u32, + pub timeout: usize, + pub max_retries: usize, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs new file mode 100644 index 000000000000..38337438c10e --- /dev/null +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -0,0 +1,20 @@ +use serde::Deserialize; + +use crate::{AvailConfig, ObjectStoreConfig}; + +pub mod avail; + +pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; +pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; + +#[derive(Debug, Clone, PartialEq)] +pub struct DAClientConfig { + pub client: DAClient, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +#[serde(tag = "client")] +pub enum DAClient { + Avail(AvailConfig), + ObjectStore(ObjectStoreConfig), +} diff --git a/core/lib/config/src/configs/da_dispatcher.rs b/core/lib/config/src/configs/da_dispatcher.rs index 303a2c0b54c1..e9ad6bd3c074 100644 --- a/core/lib/config/src/configs/da_dispatcher.rs +++ b/core/lib/config/src/configs/da_dispatcher.rs @@ -5,6 +5,7 @@ use serde::Deserialize; pub const DEFAULT_POLLING_INTERVAL_MS: u32 = 5000; pub const DEFAULT_MAX_ROWS_TO_DISPATCH: u32 = 100; pub const DEFAULT_MAX_RETRIES: u16 = 5; +pub const DEFAULT_USE_DUMMY_INCLUSION_DATA: bool = false; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct DADispatcherConfig { @@ -14,6 +15,10 @@ pub struct DADispatcherConfig { pub max_rows_to_dispatch: Option, /// The maximum number of retries for the dispatch of a blob. pub max_retries: Option, + /// Use dummy value as inclusion proof instead of getting it from the client. + // TODO: run a verification task to check if the L1 contract expects the inclusion proofs to + // avoid the scenario where contracts expect real proofs, and server is using dummy proofs. + pub use_dummy_inclusion_data: Option, } impl DADispatcherConfig { @@ -22,6 +27,7 @@ impl DADispatcherConfig { polling_interval_ms: Some(DEFAULT_POLLING_INTERVAL_MS), max_rows_to_dispatch: Some(DEFAULT_MAX_ROWS_TO_DISPATCH), max_retries: Some(DEFAULT_MAX_RETRIES), + use_dummy_inclusion_data: Some(DEFAULT_USE_DUMMY_INCLUSION_DATA), } } @@ -40,4 +46,9 @@ impl DADispatcherConfig { pub fn max_retries(&self) -> u16 { self.max_retries.unwrap_or(DEFAULT_MAX_RETRIES) } + + pub fn use_dummy_inclusion_data(&self) -> bool { + self.use_dummy_inclusion_data + .unwrap_or(DEFAULT_USE_DUMMY_INCLUSION_DATA) + } } diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 097f3c4112b3..618cfd3d388c 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -65,8 +65,7 @@ pub struct ExperimentalVmPlaygroundConfig { #[serde(default)] pub fast_vm_mode: FastVmMode, /// Path to the RocksDB cache directory. - #[serde(default = "ExperimentalVmPlaygroundConfig::default_db_path")] - pub db_path: String, + pub db_path: Option, /// First L1 batch to consider processed. Will not be used if the processing cursor is persisted, unless the `reset` flag is set. #[serde(default)] pub first_processed_batch: L1BatchNumber, @@ -83,7 +82,7 @@ impl Default for ExperimentalVmPlaygroundConfig { fn default() -> Self { Self { fast_vm_mode: FastVmMode::default(), - db_path: Self::default_db_path(), + db_path: None, first_processed_batch: L1BatchNumber(0), window_size: Self::default_window_size(), reset: false, @@ -92,10 +91,6 @@ impl Default for ExperimentalVmPlaygroundConfig { } impl ExperimentalVmPlaygroundConfig { - pub fn default_db_path() -> String { - "./db/vm_playground".to_owned() - } - pub fn default_window_size() -> NonZeroU32 { NonZeroU32::new(1).unwrap() } diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index f6a21beaa6dc..32558dd2244b 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use crate::ObjectStoreConfig; -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum SetupLoadMode { FromDisk, FromMemory, diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 0fd752b5c286..294d4d1bbd44 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use serde::Deserialize; -use zksync_basic_types::basic_fri_types::CircuitIdRoundTuple; +use zksync_basic_types::basic_fri_types::{AggregationRound, CircuitIdRoundTuple}; /// Configuration for the grouping of specialized provers. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -81,6 +81,7 @@ impl FriProverGroupConfig { .flatten() .collect() } + /// check all_circuit ids present exactly once /// and For each aggregation round, check that the circuit ids are in the correct range. /// For example, in aggregation round 0, the circuit ids should be 1 to 15 + 255 (EIP4844). @@ -89,7 +90,6 @@ impl FriProverGroupConfig { /// In aggregation round 3, the circuit ids should be 255. /// In aggregation round 4, the circuit ids should be 1. pub fn validate(&self) -> anyhow::Result<()> { - let mut rounds: Vec> = vec![Vec::new(); 5]; let groups = [ &self.group_0, &self.group_1, @@ -107,110 +107,45 @@ impl FriProverGroupConfig { &self.group_13, &self.group_14, ]; - for group in groups { - for circuit_round in group { - let round = match rounds.get_mut(circuit_round.aggregation_round as usize) { - Some(round) => round, - None => anyhow::bail!( - "Invalid aggregation round {}.", - circuit_round.aggregation_round - ), - }; - round.push(circuit_round.clone()); - } - } - - for (round, round_data) in rounds.iter().enumerate() { - let circuit_ids: Vec = round_data.iter().map(|x| x.circuit_id).collect(); - let unique_circuit_ids: HashSet = circuit_ids.iter().copied().collect(); - let duplicates: HashSet = circuit_ids - .iter() - .filter(|id| circuit_ids.iter().filter(|x| x == id).count() > 1) - .copied() - .collect(); + let mut expected_circuit_ids: HashSet<_> = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|r| r.circuit_ids()) + .collect(); - let (missing_ids, not_in_range, expected_circuits_description) = match round { - 0 => { - let mut expected_range: Vec<_> = (1..=15).collect(); - expected_range.push(255); - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit IDs 1 to 15 and 255") - } - 1 => { - let expected_range: Vec<_> = (3..=18).collect(); - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit IDs 3 to 18") - } - 2 => { - let expected_range: Vec<_> = vec![2]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 2") + let mut provided_circuit_ids = HashSet::new(); + for (group_id, group) in groups.iter().enumerate() { + for circuit_id_round in group.iter() { + // Make sure that it's a known circuit. + if !expected_circuit_ids.contains(circuit_id_round) { + anyhow::bail!( + "Group {} contains unexpected circuit id: {:?}", + group_id, + circuit_id_round + ); } - 3 => { - let expected_range: Vec<_> = vec![255]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 255") - } - 4 => { - let expected_range: Vec<_> = vec![1]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 1") - } - _ => { - anyhow::bail!("Unknown round {}", round); + // Remove this circuit from the expected set: later we will check that all circuits + // are present. + expected_circuit_ids.remove(circuit_id_round); + + // Make sure that the circuit is not duplicated. + if provided_circuit_ids.contains(circuit_id_round) { + anyhow::bail!( + "Group {} contains duplicate circuit id: {:?}", + group_id, + circuit_id_round + ); } - }; - if !missing_ids.is_empty() { - anyhow::bail!("Circuit IDs for round {round} are missing: {missing_ids:?}"); - } - if circuit_ids.len() != unique_circuit_ids.len() { - anyhow::bail!("Circuit IDs: {duplicates:?} should be unique for round {round}.",); - } - if !not_in_range.is_empty() { - anyhow::bail!("Aggregation round {round} should only contain {expected_circuits_description}. Ids out of range: {not_in_range:?}"); + provided_circuit_ids.insert(circuit_id_round.clone()); } } + // All the circuit IDs should have been removed from the expected set. + if !expected_circuit_ids.is_empty() { + anyhow::bail!( + "Some circuit ids are missing from the groups: {:?}", + expected_circuit_ids + ); + } + Ok(()) } } diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 38ffd3d45fac..bb733510f77d 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -3,6 +3,7 @@ use crate::{ base_token_adjuster::BaseTokenAdjusterConfig, chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, consensus::ConsensusConfig, + da_client::DAClientConfig, da_dispatcher::DADispatcherConfig, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, @@ -41,6 +42,7 @@ pub struct GeneralConfig { pub eth: Option, pub snapshot_creator: Option, pub observability: Option, + pub da_client_config: Option, pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, pub basic_witness_input_producer_config: Option, diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 2c5c91128431..6c4bacc3a6e2 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -20,7 +20,14 @@ pub struct GenesisConfig { pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, - pub recursion_scheduler_level_vk_hash: H256, + // Note: `serde` isn't used with protobuf config. The same alias is implemented in + // `zksync_protobuf_config` manually. + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, pub fee_account: Address, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -37,7 +44,7 @@ impl GenesisConfig { GenesisConfig { genesis_root_hash: Some(H256::repeat_byte(0x01)), rollup_last_leaf_index: Some(26), - recursion_scheduler_level_vk_hash: H256::repeat_byte(0x02), + snark_wrapper_vk_hash: H256::repeat_byte(0x02), fee_account: Default::default(), genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), @@ -54,3 +61,37 @@ impl GenesisConfig { } } } + +#[cfg(test)] +mod tests { + use super::GenesisConfig; + + // This test checks that serde overrides (`rename`, `alias`) work for `snark_wrapper_vk_hash` field. + #[test] + fn genesis_serde_snark_wrapper_vk_hash() { + let genesis = GenesisConfig::for_tests(); + let genesis_str = serde_json::to_string(&genesis).unwrap(); + + // Check that we use backward-compatible name in serialization. + // If you want to remove this check, make sure that all the potential clients are updated. + assert!( + genesis_str.contains("recursion_scheduler_level_vk_hash"), + "Serialization should use backward-compatible name" + ); + + let genesis2: GenesisConfig = serde_json::from_str(&genesis_str).unwrap(); + assert_eq!(genesis, genesis2); + + let genesis_json = r#"{ + "snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111", + "l1_chain_id": 1, + "l2_chain_id": 1, + "fee_account": "0x1111111111111111111111111111111111111111", + "dummy_verifier": false, + "l1_batch_commit_data_generator_mode": "Rollup" + }"#; + serde_json::from_str::(genesis_json).unwrap_or_else(|err| { + panic!("Failed to parse genesis config with a new name: {}", err) + }); + } +} diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index e1eb13375667..39e304562fa0 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -4,29 +4,4 @@ use serde::Deserialize; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct HouseKeeperConfig { pub l1_batch_metrics_reporting_interval_ms: u64, - pub gpu_prover_queue_reporting_interval_ms: u64, - pub prover_job_retrying_interval_ms: u64, - pub prover_stats_reporting_interval_ms: u64, - pub witness_job_moving_interval_ms: u64, - pub witness_generator_stats_reporting_interval_ms: u64, - pub witness_generator_job_retrying_interval_ms: u64, - pub prover_db_pool_size: u32, - pub proof_compressor_job_retrying_interval_ms: u64, - pub proof_compressor_stats_reporting_interval_ms: u64, - pub prover_job_archiver_archiving_interval_ms: Option, - pub prover_job_archiver_archive_after_secs: Option, - pub fri_gpu_prover_archiver_archiving_interval_ms: Option, - pub fri_gpu_prover_archiver_archive_after_secs: Option, -} - -impl HouseKeeperConfig { - pub fn prover_job_archiver_params(&self) -> Option<(u64, u64)> { - self.prover_job_archiver_archiving_interval_ms - .zip(self.prover_job_archiver_archive_after_secs) - } - - pub fn fri_gpu_prover_archiver_params(&self) -> Option<(u64, u64)> { - self.fri_gpu_prover_archiver_archiving_interval_ms - .zip(self.fri_gpu_prover_archiver_archive_after_secs) - } } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b213060f7ced..1ad503e0687f 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,6 +5,7 @@ pub use self::{ commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, + da_client::{avail::AvailConfig, DAClientConfig}, da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, @@ -38,6 +39,7 @@ mod commitment_generator; pub mod consensus; pub mod contract_verifier; pub mod contracts; +pub mod da_client; pub mod da_dispatcher; pub mod database; pub mod en_config; diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index ae8288fa72ea..9191edc39822 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,9 +1,10 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, - DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, AvailConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, + DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, + ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, + PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index e028c3d3aec0..4a2858b9cbfc 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -12,8 +12,12 @@ use zksync_basic_types::{ use zksync_consensus_utils::EncodeDist; use zksync_crypto_primitives::K256PrivateKey; -use crate::configs::{ - self, eth_sender::PubdataSendingMode, external_price_api_client::ForcedPriceClientConfig, +use crate::{ + configs::{ + self, da_client::DAClient::Avail, eth_sender::PubdataSendingMode, + external_price_api_client::ForcedPriceClientConfig, + }, + AvailConfig, }; trait Sample { @@ -243,17 +247,17 @@ impl Distribution for EncodeDist { default_upgrade_addr: rng.gen(), diamond_proxy_addr: rng.gen(), validator_timelock_addr: rng.gen(), - l1_erc20_bridge_proxy_addr: rng.gen(), - l2_erc20_bridge_addr: rng.gen(), - l1_shared_bridge_proxy_addr: rng.gen(), - l2_shared_bridge_addr: rng.gen(), - l1_weth_bridge_proxy_addr: rng.gen(), - l2_weth_bridge_addr: rng.gen(), - l2_testnet_paymaster_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), + l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), + l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), - base_token_addr: rng.gen(), - chain_admin_addr: rng.gen(), ecosystem_contracts: self.sample(rng), + base_token_addr: self.sample_opt(|| rng.gen()), + chain_admin_addr: self.sample_opt(|| rng.gen()), } } } @@ -630,19 +634,6 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::house_keeper::HouseKeeperConfig { configs::house_keeper::HouseKeeperConfig { l1_batch_metrics_reporting_interval_ms: self.sample(rng), - gpu_prover_queue_reporting_interval_ms: self.sample(rng), - prover_job_retrying_interval_ms: self.sample(rng), - prover_stats_reporting_interval_ms: self.sample(rng), - witness_job_moving_interval_ms: self.sample(rng), - witness_generator_stats_reporting_interval_ms: self.sample(rng), - prover_db_pool_size: self.sample(rng), - witness_generator_job_retrying_interval_ms: self.sample(rng), - proof_compressor_job_retrying_interval_ms: self.sample(rng), - proof_compressor_stats_reporting_interval_ms: self.sample(rng), - prover_job_archiver_archiving_interval_ms: self.sample(rng), - prover_job_archiver_archive_after_secs: self.sample(rng), - fri_gpu_prover_archiver_archiving_interval_ms: self.sample(rng), - fri_gpu_prover_archiver_archive_after_secs: self.sample(rng), } } } @@ -741,7 +732,7 @@ impl Distribution for EncodeDist { l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: rng.gen(), + snark_wrapper_vk_hash: rng.gen(), dummy_verifier: rng.gen(), l1_batch_commit_data_generator_mode: match rng.gen_range(0..2) { 0 => L1BatchCommitmentMode::Rollup, @@ -790,6 +781,7 @@ impl Distribution for EncodeDist { validators: self.sample_collect(rng), attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), + registry_address: self.sample_opt(|| rng.gen()), } } } @@ -934,12 +926,28 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { + configs::da_client::DAClientConfig { + client: Avail(AvailConfig { + api_node_url: self.sample(rng), + bridge_api_url: self.sample(rng), + seed: self.sample(rng), + app_id: self.sample(rng), + timeout: self.sample(rng), + max_retries: self.sample(rng), + }), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_dispatcher::DADispatcherConfig { configs::da_dispatcher::DADispatcherConfig { polling_interval_ms: self.sample(rng), max_rows_to_dispatch: self.sample(rng), max_retries: self.sample(rng), + use_dummy_inclusion_data: self.sample(rng), } } } @@ -1045,6 +1053,7 @@ impl Distribution for Enc l1_receipt_checking_sleep_ms: self.sample(rng), l1_tx_sending_max_attempts: self.sample(rng), l1_tx_sending_sleep_ms: self.sample(rng), + l1_update_deviation_percentage: self.sample(rng), price_fetching_max_attempts: self.sample(rng), price_fetching_sleep_ms: self.sample(rng), halt_on_error: self.sample(rng), @@ -1131,6 +1140,7 @@ impl Distribution for EncodeDist { eth: self.sample(rng), snapshot_creator: self.sample(rng), observability: self.sample(rng), + da_client_config: self.sample(rng), da_dispatcher_config: self.sample(rng), protective_reads_writer_config: self.sample(rng), basic_witness_input_producer_config: self.sample(rng), diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 44bb05a89764..73b4a0ffaaa2 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -100,6 +100,11 @@ pub const SHA256_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x02, ]); +pub const SECP256R1_VERIFY_PRECOMPILE_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, +]); + pub const EC_ADD_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml index 2803e3bb4185..580982c9a700 100644 --- a/core/lib/contract_verifier/Cargo.toml +++ b/core/lib/contract_verifier/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_dal.workspace = true -zksync_env_config.workspace = true zksync_config.workspace = true zksync_contracts.workspace = true zksync_queued_job_processor.workspace = true diff --git a/core/lib/contract_verifier/src/lib.rs b/core/lib/contract_verifier/src/lib.rs index 82751d4c9754..c8d9b89d834c 100644 --- a/core/lib/contract_verifier/src/lib.rs +++ b/core/lib/contract_verifier/src/lib.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - path::Path, + path::{Path, PathBuf}, time::{Duration, Instant}, }; @@ -20,7 +20,7 @@ use zksync_types::{ }, Address, }; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; use crate::{ error::ContractVerifierError, @@ -38,8 +38,8 @@ lazy_static! { static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); } -fn home_path() -> &'static Path { - workspace_dir_or_current_dir() +fn home_path() -> PathBuf { + Workspace::locate().core() } #[derive(Debug)] diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a7ef0e5b26ca..f57649c9d695 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -16,7 +16,7 @@ use ethabi::{ }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, workspace_dir_or_current_dir}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, env::Workspace}; pub mod test_contracts; @@ -48,6 +48,10 @@ const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = ( ); const GOVERNANCE_CONTRACT_FILE: (&str, &str) = ("governance", "IGovernance.sol/IGovernance.json"); const CHAIN_ADMIN_CONTRACT_FILE: (&str, &str) = ("governance", "IChainAdmin.sol/IChainAdmin.json"); +const GETTERS_FACET_CONTRACT_FILE: (&str, &str) = ( + "state-transition/chain-deps/facets", + "Getters.sol/GettersFacet.json", +); const MULTICALL3_CONTRACT_FILE: (&str, &str) = ("dev-contracts", "Multicall3.sol/Multicall3.json"); const VERIFIER_CONTRACT_FILE: (&str, &str) = ("state-transition", "Verifier.sol/Verifier.json"); @@ -60,8 +64,8 @@ const LOADNEXT_CONTRACT_FILE: &str = const LOADNEXT_SIMPLE_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json"; -fn home_path() -> &'static Path { - workspace_dir_or_current_dir() +fn home_path() -> PathBuf { + Workspace::locate().core() } fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> serde_json::Value { @@ -134,6 +138,10 @@ pub fn chain_admin_contract() -> Contract { load_contract_for_both_compilers(CHAIN_ADMIN_CONTRACT_FILE) } +pub fn getters_facet_contract() -> Contract { + load_contract_for_both_compilers(GETTERS_FACET_CONTRACT_FILE) +} + pub fn state_transition_manager_contract() -> Contract { load_contract_for_both_compilers(STATE_TRANSITION_CONTRACT_FILE) } diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 589a077d4bf9..a68d715eb574 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -12,9 +12,5 @@ categories.workspace = true [dependencies] serde = { workspace = true, features = ["derive"] } -tracing.workspace = true async-trait.workspace = true anyhow.workspace = true - -zksync_config.workspace = true -zksync_types.workspace = true diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index 2b15cbe905ed..e7e4453d727e 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -35,6 +35,12 @@ pub struct DispatchResponse { pub blob_id: String, } +impl From for DispatchResponse { + fn from(blob_id: String) -> Self { + DispatchResponse { blob_id } + } +} + /// `InclusionData` is the data needed to verify on L1 that a blob is included in the DA layer. #[derive(Default, Serialize)] pub struct InclusionData { diff --git a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json b/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json deleted file mode 100644 index 5e10786c7e3f..000000000000 --- a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int4", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd" -} diff --git a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json b/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json deleted file mode 100644 index 3baa610d7d78..000000000000 --- a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n genesis\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "genesis", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542" -} diff --git a/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json new file mode 100644 index 000000000000..28a1e54230d8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n genesis,\n global_config\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "genesis", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "global_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634" +} diff --git a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json similarity index 51% rename from core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json rename to core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json index 38b88c316eef..3817369ecc16 100644 --- a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json +++ b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, genesis, state)\n VALUES\n (TRUE, $1, $2)\n ", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Jsonb", "Jsonb", "Jsonb" ] }, "nullable": [] }, - "hash": "f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975" + "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" } diff --git a/core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json b/core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json similarity index 54% rename from core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json rename to core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json index 0ed8005289f7..e48fddcf6175 100644 --- a/core/lib/dal/.sqlx/query-d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6.json +++ b/core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at)\n VALUES\n ($1, $2, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number, tee_type) DO NOTHING\n ", + "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT (l1_batch_number, tee_type) DO NOTHING\n ", "describe": { "columns": [], "parameters": { "Left": [ "Int8", + "Text", "Text" ] }, "nullable": [] }, - "hash": "d8bc4af72e3d94df53967c83d577a1e1abf3d268b16498cc65758af66781cbb6" + "hash": "2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json similarity index 58% rename from core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json rename to core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json index a42fbe98ff2f..cabe0a3dc557 100644 --- a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json +++ b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" + "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" } diff --git a/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json new file mode 100644 index 000000000000..5652e186ceb9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5" +} diff --git a/core/lib/dal/.sqlx/query-414749a3d8d1ac4f2c66b386df47d113257cca7a7fc6c8036b61cc0e005099a8.json b/core/lib/dal/.sqlx/query-414749a3d8d1ac4f2c66b386df47d113257cca7a7fc6c8036b61cc0e005099a8.json new file mode 100644 index 000000000000..e0c5103fac90 --- /dev/null +++ b/core/lib/dal/.sqlx/query-414749a3d8d1ac4f2c66b386df47d113257cca7a7fc6c8036b61cc0e005099a8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND tee_type = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "414749a3d8d1ac4f2c66b386df47d113257cca7a7fc6c8036b61cc0e005099a8" +} diff --git a/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json b/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json deleted file mode 100644 index 7e5f9e1713c4..000000000000 --- a/core/lib/dal/.sqlx/query-47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $1\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $2\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $3::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "47975cc0b5e4f3a6b5224cb452b8fee3209a950943dc2b4da82c324e1c09132f" -} diff --git a/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json b/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json deleted file mode 100644 index 2d9a24d6d79c..000000000000 --- a/core/lib/dal/.sqlx/query-5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'unpicked',\n updated_at = NOW()\n WHERE\n l1_batch_number = $1\n AND tee_type = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "5c5bdb0e419049f9fb4d8b3bbec468765628fd2c3b7c2a408d18b5aba0df9a30" -} diff --git a/core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json b/core/lib/dal/.sqlx/query-6292dc157e2b4c64c513b4b8f043ea4423fdaf5d03cb70e8c3a67b6d4a24d29e.json similarity index 53% rename from core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json rename to core/lib/dal/.sqlx/query-6292dc157e2b4c64c513b4b8f043ea4423fdaf5d03cb70e8c3a67b6d4a24d29e.json index 8b67041427d3..62b1be92c909 100644 --- a/core/lib/dal/.sqlx/query-a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f.json +++ b/core/lib/dal/.sqlx/query-6292dc157e2b4c64c513b4b8f043ea4423fdaf5d03cb70e8c3a67b6d4a24d29e.json @@ -1,10 +1,11 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n tee_type = $1,\n status = 'generated',\n pubkey = $2,\n signature = $3,\n proof = $4,\n updated_at = NOW()\n WHERE\n l1_batch_number = $5\n ", + "query": "\n UPDATE tee_proof_generation_details\n SET\n tee_type = $1,\n status = $2,\n pubkey = $3,\n signature = $4,\n proof = $5,\n updated_at = NOW()\n WHERE\n l1_batch_number = $6\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Text", "Text", "Bytea", "Bytea", @@ -14,5 +15,5 @@ }, "nullable": [] }, - "hash": "a8fdcb5180fc5fd125a003e9675f213a0b02b3ff96398920bc0250397bb2a95f" + "hash": "6292dc157e2b4c64c513b4b8f043ea4423fdaf5d03cb70e8c3a67b6d4a24d29e" } diff --git a/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json new file mode 100644 index 000000000000..ec17f2e0b61b --- /dev/null +++ b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n attesters\n FROM\n l1_batches_consensus_committees\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "attesters", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1" +} diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json deleted file mode 100644 index 5130763af73c..000000000000 --- a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" -} diff --git a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json similarity index 70% rename from core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json rename to core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json index 68b595b50274..3297d411d8a7 100644 --- a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json +++ b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.recursion_scheduler_level_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -30,7 +30,7 @@ }, { "ordinal": 5, - "name": "recursion_scheduler_level_vk_hash", + "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } ], @@ -48,5 +48,5 @@ false ] }, - "hash": "e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526" + "hash": "85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc" } diff --git a/core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json b/core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json similarity index 77% rename from core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json rename to core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json index 70f7f9d12fa4..42cf55bd939e 100644 --- a/core/lib/dal/.sqlx/query-e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da.json +++ b/core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -24,12 +24,13 @@ ] } } - } + }, + "Text" ] }, "nullable": [ false ] }, - "hash": "e048951ded9e4a4a28238334bc4dc118360ab83bae3196ec941216901be629da" + "hash": "86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f" } diff --git a/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json new file mode 100644 index 000000000000..a59468bd516c --- /dev/null +++ b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n l1_batches_consensus\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97" +} diff --git a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json similarity index 64% rename from core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json rename to core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json index 32a9955cc270..ac10e8b1a8f0 100644 --- a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json +++ b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND recursion_scheduler_level_vk_hash = $2\n ORDER BY\n patch DESC\n ", + "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND snark_wrapper_vk_hash = $2\n ORDER BY\n patch DESC\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9" + "hash": "a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b" } diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json new file mode 100644 index 000000000000..356fd8e9d999 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" +} diff --git a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json b/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json deleted file mode 100644 index 0fd16adc474d..000000000000 --- a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85" -} diff --git a/core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json b/core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json new file mode 100644 index 000000000000..abe74036f4c6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + { + "Custom": { + "name": "tee_verifier_input_producer_job_status", + "kind": { + "Enum": [ + "Queued", + "ManuallySkipped", + "InProgress", + "Successful", + "Failed" + ] + } + } + }, + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9" +} diff --git a/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json new file mode 100644 index 000000000000..fa47ccab50ab --- /dev/null +++ b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25" +} diff --git a/core/lib/dal/doc/TeeProofGenerationDal.md b/core/lib/dal/doc/TeeProofGenerationDal.md index 167e6b3c42ce..fcfa379816c7 100644 --- a/core/lib/dal/doc/TeeProofGenerationDal.md +++ b/core/lib/dal/doc/TeeProofGenerationDal.md @@ -11,11 +11,9 @@ title: Status Diagram --- stateDiagram-v2 -[*] --> ready_to_be_proven : insert_tee_proof_generation_job -ready_to_be_proven --> picked_by_prover : lock_batch_for_proving +[*] --> unpicked : insert_tee_proof_generation_job +unpicked --> picked_by_prover : lock_batch_for_proving picked_by_prover --> generated : save_proof_artifacts_metadata -generated --> [*] - picked_by_prover --> unpicked : unlock_batch -unpicked --> [*] +generated --> [*] ``` diff --git a/core/lib/dal/migrations/20240828130000_tee_unpicked_status.down.sql b/core/lib/dal/migrations/20240828130000_tee_unpicked_status.down.sql new file mode 100644 index 000000000000..84d806c91287 --- /dev/null +++ b/core/lib/dal/migrations/20240828130000_tee_unpicked_status.down.sql @@ -0,0 +1,3 @@ +UPDATE tee_proof_generation_details +SET status = 'ready_to_be_proven' +WHERE status = 'unpicked'; diff --git a/core/lib/dal/migrations/20240828130000_tee_unpicked_status.up.sql b/core/lib/dal/migrations/20240828130000_tee_unpicked_status.up.sql new file mode 100644 index 000000000000..46b34c8d1485 --- /dev/null +++ b/core/lib/dal/migrations/20240828130000_tee_unpicked_status.up.sql @@ -0,0 +1,3 @@ +UPDATE tee_proof_generation_details +SET status = 'unpicked' +WHERE status = 'ready_to_be_proven'; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql new file mode 100644 index 000000000000..fee0b42079f3 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE consensus_replica_state DROP COLUMN global_config; + +DROP TABLE l1_batches_consensus_committees; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql new file mode 100644 index 000000000000..c31952b96465 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE consensus_replica_state + ADD COLUMN global_config JSONB NULL; + +CREATE TABLE l1_batches_consensus_committees ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + attesters JSONB NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..daa108d4ff39 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE protocol_patches SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE protocol_patches DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..730b3a50d8a0 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE protocol_patches ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE protocol_patches SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE protocol_patches ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN protocol_patches.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 658da6c76821..f0ef336bc543 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -22,6 +22,36 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::models::{parse_h160, parse_h256}; +/// Global config of the consensus. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalConfig { + pub genesis: validator::Genesis, + pub registry_address: Option, +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + } + } +} + /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -469,3 +499,24 @@ impl ProtoRepr for proto::Transaction { } } } + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ea0c12f1b5f3..da9151f10f4d 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.dal; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; message Payload { // zksync-era ProtocolVersionId @@ -117,6 +118,15 @@ message PaymasterParams { optional bytes paymaster_input = 2; // required } +message AttesterCommittee { + repeated roles.attester.WeightedAttester members = 1; // required +} + +message GlobalConfig { + optional roles.validator.Genesis genesis = 1; // required + optional bytes registry_address = 2; // optional; H160 +} + message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 8f05cb381777..2dca58e2a6a6 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; -use bigdecimal::Zero as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ @@ -7,10 +6,10 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoFmt as _; +use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; -pub use crate::consensus::{AttestationStatus, Payload}; +pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. @@ -33,72 +32,77 @@ pub enum InsertCertificateError { } impl ConsensusDal<'_, '_> { - /// Fetches genesis. - pub async fn genesis(&mut self) -> DalResult> { - Ok(sqlx::query!( + /// Fetch consensus global config. + pub async fn global_config(&mut self) -> anyhow::Result> { + // global_config contains a superset of genesis information. + // genesis column is deprecated and will be removed once the main node + // is fully upgraded. + // For now we keep the information between both columns in sync. + let Some(row) = sqlx::query!( r#" SELECT - genesis + genesis, + global_config FROM consensus_replica_state WHERE fake_key "# ) - .try_map(|row| { - let Some(genesis) = row.genesis else { - return Ok(None); - }; - // Deserialize the json, but don't allow for unknown fields. - // We might encounter an unknown fields here in case if support for the previous - // consensus protocol version is removed before the migration to a new version - // is performed. The node should NOT operate in such a state. - Ok(Some( - validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis, /*deny_unknown_fields=*/ true, - ) - .decode_column("genesis")?, - ) - .decode_column("genesis")? - .with_hash(), - )) - }) - .instrument("genesis") + .instrument("global_config") .fetch_optional(self.storage) .await? - .flatten()) + else { + return Ok(None); + }; + if let Some(global_config) = row.global_config { + return Ok(Some( + zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, + )); + } + if let Some(genesis) = row.genesis { + let genesis: validator::Genesis = + zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + return Ok(Some(GlobalConfig { + genesis, + registry_address: None, + })); + } + Ok(None) } - /// Attempts to update the genesis. + /// Attempts to update the global config. /// Fails if the new genesis is invalid. /// Fails if the new genesis has different `chain_id`. /// Fails if the storage contains a newer genesis (higher fork number). - /// Noop if the new genesis is the same as the current one. + /// Noop if the new global config is the same as the current one. /// Resets the stored consensus state otherwise and purges all certificates. - pub async fn try_update_genesis(&mut self, genesis: &validator::Genesis) -> anyhow::Result<()> { + pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().genesis().await? { + if let Some(got) = txn.consensus_dal().global_config().await? { // Exit if the genesis didn't change. - if &got == genesis { + if &got == want { return Ok(()); } anyhow::ensure!( - got.chain_id == genesis.chain_id, + got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.chain_id, - genesis.chain_id, + got.genesis.chain_id, + want.genesis.chain_id, ); anyhow::ensure!( - got.fork_number < genesis.fork_number, + got.genesis.fork_number < want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.fork_number, - genesis.fork_number, + got.genesis.fork_number, + want.genesis.fork_number, ); - genesis.verify().context("genesis.verify()")?; + want.genesis.verify().context("genesis.verify()")?; } let genesis = - zksync_protobuf::serde::serialize(genesis, serde_json::value::Serializer).unwrap(); + zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = + zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); let state = zksync_protobuf::serde::serialize( &ReplicaState::default(), serde_json::value::Serializer, @@ -131,14 +135,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2) + (TRUE, $1, $2, $3) "#, + global_config, genesis, state, ) - .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .instrument("try_update_global_config#INSERT INTO consensus_replica_state") .execute(&mut txn) .await?; txn.commit().await?; @@ -154,25 +159,33 @@ impl ConsensusDal<'_, '_> { .start_transaction() .await .context("start_transaction")?; - let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { + let Some(old) = txn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { return Ok(()); }; - let new = validator::GenesisRaw { - chain_id: old.chain_id, - fork_number: old.fork_number.next(), - first_block: txn - .consensus_dal() - .next_block() - .await - .context("next_block()")?, - - protocol_version: old.protocol_version, - validators: old.validators.clone(), - attesters: old.attesters.clone(), - leader_selection: old.leader_selection.clone(), - } - .with_hash(); - txn.consensus_dal().try_update_genesis(&new).await?; + let new = GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: old.genesis.chain_id, + fork_number: old.genesis.fork_number.next(), + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, + + protocol_version: old.genesis.protocol_version, + validators: old.genesis.validators.clone(), + attesters: old.genesis.attesters.clone(), + leader_selection: old.genesis.leader_selection.clone(), + } + .with_hash(), + registry_address: old.registry_address, + }; + txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; Ok(()) } @@ -259,7 +272,12 @@ impl ConsensusDal<'_, '_> { /// so it might NOT be the certificate for the last L2 block. pub async fn block_certificates_range(&mut self) -> anyhow::Result { // It cannot be older than genesis first block. - let mut start = self.genesis().await?.context("genesis()")?.first_block; + let mut start = self + .global_config() + .await? + .context("genesis()")? + .genesis + .first_block; start = start.max(self.first_block().await.context("first_block()")?); let row = sqlx::query!( r#" @@ -422,21 +440,96 @@ impl ConsensusDal<'_, '_> { Ok(()) } + /// Persist the attester committee for the given batch. + pub async fn upsert_attester_committee( + &mut self, + number: attester::BatchNumber, + committee: &attester::Committee, + ) -> anyhow::Result<()> { + let committee = proto::AttesterCommittee::build(committee); + let committee = + zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) + .unwrap(); + sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + VALUES + ($1, $2, NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() + "#, + i64::try_from(number.0).context("overflow")?, + committee + ) + .instrument("upsert_attester_committee") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + /// Fetches the attester committee for the L1 batch with the given number. + pub async fn attester_committee( + &mut self, + n: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + attesters + FROM + l1_batches_consensus_committees + WHERE + l1_batch_number = $1 + "#, + i64::try_from(n.0)? + ) + .instrument("attester_committee") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) + .context("deserialize_proto()")?; + Ok(Some( + proto::AttesterCommittee::read(&raw).context("read()")?, + )) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. - /// No verification is performed - it cannot be performed due to circular dependency on + /// Verification against previously stored attester committee is performed. + /// Batch hash is not verified - it cannot be performed due to circular dependency on /// `zksync_l1_contract_interface`. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, ) -> anyhow::Result<()> { - let res = sqlx::query!( + let cfg = self + .global_config() + .await + .context("global_config()")? + .context("genesis is missing")?; + let committee = self + .attester_committee(cert.message.number) + .await + .context("attester_committee()")? + .context("attester committee is missing")?; + cert.verify(cfg.genesis.hash(), &committee) + .context("cert.verify()")?; + sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. @@ -446,9 +539,6 @@ impl ConsensusDal<'_, '_> { .report_latency() .execute(self.storage) .await?; - if res.rows_affected().is_zero() { - tracing::debug!(l1_batch_number = ?cert.message.number, "duplicate batch certificate"); - } Ok(()) } @@ -457,24 +547,28 @@ impl ConsensusDal<'_, '_> { pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { - let row = sqlx::query!( + let Some(row) = sqlx::query!( r#" SELECT - MAX(l1_batch_number) AS "number" + l1_batch_number FROM l1_batches_consensus + ORDER BY + l1_batch_number DESC + LIMIT + 1 "# ) .instrument("last_batch_certificate_number") .report_latency() - .fetch_one(self.storage) - .await?; - - let Some(n) = row.number else { + .fetch_optional(self.storage) + .await? + else { return Ok(None); }; + Ok(Some(attester::BatchNumber( - n.try_into().context("overflow")?, + row.l1_batch_number.try_into().context("overflow")?, ))) } @@ -529,7 +623,7 @@ impl ConsensusDal<'_, '_> { /// This is a main node only query. /// ENs should call the attestation_status RPC of the main node. pub async fn attestation_status(&mut self) -> anyhow::Result> { - let Some(genesis) = self.genesis().await.context("genesis()")? else { + let Some(cfg) = self.global_config().await.context("genesis()")? else { return Ok(None); }; let Some(next_batch_to_attest) = async { @@ -542,18 +636,21 @@ impl ConsensusDal<'_, '_> { return Ok(Some(last + 1)); } // Otherwise start with the batch containing the first block of the fork. - self.batch_of_block(genesis.first_block) + self.batch_of_block(cfg.genesis.first_block) .await .context("batch_of_block()") } .await? else { - tracing::info!(%genesis.first_block, "genesis block not found"); + tracing::info!(%cfg.genesis.first_block, "genesis block not found"); return Ok(None); }; Ok(Some(AttestationStatus { - genesis: genesis.hash(), - next_batch_to_attest, + genesis: cfg.genesis.hash(), + // We never attest batch 0 for technical reasons: + // * it is not supported to read state before batch 0. + // * the registry contract needs to be deployed before we can start operating on it + next_batch_to_attest: next_batch_to_attest.max(attester::BatchNumber(1)), })) } } @@ -563,8 +660,9 @@ mod tests { use rand::Rng as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; - use zksync_types::{L1BatchNumber, ProtocolVersion}; + use zksync_types::ProtocolVersion; + use super::GlobalConfig; use crate::{ tests::{create_l1_batch_header, create_l2_block_header}, ConnectionPool, Core, CoreDal, @@ -575,19 +673,22 @@ mod tests { let rng = &mut rand::thread_rng(); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); for n in 0..3 { let setup = validator::testonly::Setup::new(rng, 3); let mut genesis = (*setup.genesis).clone(); genesis.fork_number = validator::ForkNumber(n); - let genesis = genesis.with_hash(); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + }; conn.consensus_dal() - .try_update_genesis(&genesis) + .try_update_global_config(&cfg) .await .unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!( ReplicaState::default(), @@ -597,8 +698,8 @@ mod tests { let want: ReplicaState = rng.gen(); conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); } @@ -608,14 +709,32 @@ mod tests { #[tokio::test] async fn test_batch_certificate() { let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); - let mut mock_batch_qc = |number: L1BatchNumber| { - let mut cert: attester::BatchQC = rng.gen(); - cert.message.number.0 = u64::from(number.0); - cert.signatures.add(rng.gen(), rng.gen()); - cert + let mut make_cert = |number: attester::BatchNumber| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash: rng.gen(), + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } }; // Required for inserting l2 blocks @@ -627,8 +746,7 @@ mod tests { // Insert some mock L2 blocks and L1 batches let mut block_number = 0; let mut batch_number = 0; - let num_batches = 3; - for _ in 0..num_batches { + for _ in 0..3 { for _ in 0..3 { block_number += 1; let l2_block = create_l2_block_header(block_number); @@ -636,64 +754,56 @@ mod tests { } batch_number += 1; let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() .insert_mock_l1_batch(&l1_batch) .await .unwrap(); - conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) .await .unwrap(); } - let l1_batch_number = L1BatchNumber(batch_number); + let n = attester::BatchNumber(batch_number.into()); // Insert a batch certificate for the last L1 batch. - let cert1 = mock_batch_qc(l1_batch_number); - + let want = make_cert(n); conn.consensus_dal() - .insert_batch_certificate(&cert1) + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) .await .unwrap(); - - // Try insert duplicate batch certificate for the same batch. - let cert2 = mock_batch_qc(l1_batch_number); - conn.consensus_dal() - .insert_batch_certificate(&cert2) + .insert_batch_certificate(&want) .await .unwrap(); + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n)) + .await + .is_err()); + // Retrieve the latest certificate. - let number = conn + let got_n = conn .consensus_dal() .last_batch_certificate_number() .await .unwrap() .unwrap(); - - let cert = conn + let got = conn .consensus_dal() - .batch_certificate(number) + .batch_certificate(got_n) .await .unwrap() .unwrap(); - - assert_eq!(cert, cert1, "duplicates are ignored"); + assert_eq!(got, want); // Try insert batch certificate for non-existing batch - let cert3 = mock_batch_qc(l1_batch_number.next()); - conn.consensus_dal() - .insert_batch_certificate(&cert3) - .await - .expect_err("missing payload"); - - // Insert one more L1 batch without a certificate. - conn.blocks_dal() - .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next())) .await - .unwrap(); + .is_err()); } } diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index c76547422d8f..2266d6fb60f9 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -413,8 +413,8 @@ impl EthSenderDal<'_, '_> { WHERE id = $2 "#, - eth_tx_id as i32, chain_id as i64, + eth_tx_id as i32, ) .execute(self.storage.conn()) .await?; diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index a9690dcb7993..479649f85092 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -15,7 +15,7 @@ pub mod storage_sync; pub mod storage_tee_proof; pub mod storage_transaction; pub mod storage_verification_request; -pub mod storage_witness_job_info; + #[cfg(test)] mod tests; diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index c19fa560b67c..e53bf7b9d0a4 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -13,7 +13,7 @@ pub struct StorageProtocolVersion { pub minor: i32, pub patch: i32, pub timestamp: i64, - pub recursion_scheduler_level_vk_hash: Vec, + pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, } @@ -29,9 +29,7 @@ pub(crate) fn protocol_version_from_storage( }, timestamp: storage_version.timestamp as u64, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &storage_version.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&storage_version.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs deleted file mode 100644 index 03d1120b7170..000000000000 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::{convert::TryFrom, str::FromStr}; - -use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - basic_fri_types::AggregationRound, - prover_dal::{ - JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, - }, - L1BatchNumber, -}; - -#[derive(sqlx::FromRow)] -pub struct StorageWitnessJobInfo { - pub aggregation_round: i32, - pub l1_batch_number: i64, - pub status: String, - pub error: Option, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub time_taken: Option, - pub processing_started_at: Option, - pub attempts: i32, -} - -impl From for WitnessJobInfo { - fn from(x: StorageWitnessJobInfo) -> Self { - fn nt2d(nt: NaiveDateTime) -> DateTime { - DateTime::from_naive_utc_and_offset(nt, Utc) - } - - let status = - match WitnessJobStatus::from_str(x.status.as_str()) - .unwrap_or_else(|_| panic!("Unknown value '{}' in witness job status db record.", x.status)) { - WitnessJobStatus::Successful(_) => WitnessJobStatus::Successful(WitnessJobStatusSuccessful { - started_at: - nt2d(x.processing_started_at - .unwrap_or_else(|| panic!( - "Witness job is successful but lacks processing timestamp. Batch:round {}:{} ", - x.l1_batch_number, - x.aggregation_round))), - time_taken: x.time_taken.unwrap() - NaiveTime::from_hms_opt(0,0,0).unwrap() - }), - WitnessJobStatus::Failed(_) => { - let batch = x.l1_batch_number; - let round = x.aggregation_round; - - WitnessJobStatus::Failed( - WitnessJobStatusFailed { - started_at: - nt2d(x.processing_started_at - .unwrap_or_else(|| panic!( - "Witness job is failed but lacks processing timestamp. Batch:round {}:{} ", - x.l1_batch_number, - x.aggregation_round))), - error: - x.error - .unwrap_or_else(|| panic!( - "Witness job failed but lacks error message. Batch:round {}:{}", - batch, - round)), - }) - }, - x => x - }; - - WitnessJobInfo { - block_number: L1BatchNumber(x.l1_batch_number as u32), - created_at: nt2d(x.created_at), - updated_at: nt2d(x.updated_at), - status, - position: JobPosition { - aggregation_round: AggregationRound::try_from(x.aggregation_round).unwrap(), - sequence_number: 1, // Witness job 1:1 aggregation round, per block - }, - } - } -} diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 0d17044e6c51..8cb5094fd49e 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -71,16 +71,14 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at) + protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at) VALUES ($1, $2, $3, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, version.patch.0 as i32, - l1_verifier_config - .recursion_scheduler_level_vk_hash - .as_bytes(), + l1_verifier_config.snark_wrapper_vk_hash.as_bytes(), ) .instrument("save_protocol_version#patch") .with_arg("version", &version) @@ -235,7 +233,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, protocol_patches.patch, - protocol_patches.recursion_scheduler_level_vk_hash + protocol_patches.snark_wrapper_vk_hash FROM protocol_versions JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id @@ -268,7 +266,7 @@ impl ProtocolVersionsDal<'_, '_> { let row = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM protocol_patches WHERE @@ -282,16 +280,14 @@ impl ProtocolVersionsDal<'_, '_> { .await .unwrap()?; Some(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } pub async fn get_patch_versions_for_vk( &mut self, minor_version: ProtocolVersionId, - recursion_scheduler_level_vk_hash: H256, + snark_wrapper_vk_hash: H256, ) -> DalResult> { let rows = sqlx::query!( r#" @@ -301,12 +297,12 @@ impl ProtocolVersionsDal<'_, '_> { protocol_patches WHERE minor = $1 - AND recursion_scheduler_level_vk_hash = $2 + AND snark_wrapper_vk_hash = $2 ORDER BY patch DESC "#, minor_version as i32, - recursion_scheduler_level_vk_hash.as_bytes() + snark_wrapper_vk_hash.as_bytes() ) .instrument("get_patch_versions_for_vk") .fetch_all(self.storage) diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 80e364273f69..cc6b87a07aca 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -1,6 +1,7 @@ #![doc = include_str!("../doc/TeeProofGenerationDal.md")] use std::time::Duration; +use strum::{Display, EnumString}; use zksync_db_connection::{ connection::Connection, error::DalResult, @@ -19,6 +20,16 @@ pub struct TeeProofGenerationDal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, } +#[derive(Debug, EnumString, Display)] +enum TeeProofGenerationJobStatus { + #[strum(serialize = "unpicked")] + Unpicked, + #[strum(serialize = "picked_by_prover")] + PickedByProver, + #[strum(serialize = "generated")] + Generated, +} + impl TeeProofGenerationDal<'_, '_> { pub async fn lock_batch_for_proving( &mut self, @@ -32,11 +43,11 @@ impl TeeProofGenerationDal<'_, '_> { r#" UPDATE tee_proof_generation_details SET - status = 'picked_by_prover', + status = $1, updated_at = NOW(), prover_taken_at = NOW() WHERE - tee_type = $1 + tee_type = $2 AND l1_batch_number = ( SELECT proofs.l1_batch_number @@ -44,15 +55,15 @@ impl TeeProofGenerationDal<'_, '_> { tee_proof_generation_details AS proofs JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $2 + inputs.status = $3 AND ( - proofs.status = 'ready_to_be_proven' + proofs.status = $4 OR ( - proofs.status = 'picked_by_prover' - AND proofs.prover_taken_at < NOW() - $3::INTERVAL + proofs.status = $1 + AND proofs.prover_taken_at < NOW() - $5::INTERVAL ) ) - AND proofs.l1_batch_number >= $4 + AND proofs.l1_batch_number >= $6 ORDER BY l1_batch_number ASC LIMIT @@ -63,8 +74,10 @@ impl TeeProofGenerationDal<'_, '_> { RETURNING tee_proof_generation_details.l1_batch_number "#, + TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number ); @@ -91,12 +104,13 @@ impl TeeProofGenerationDal<'_, '_> { r#" UPDATE tee_proof_generation_details SET - status = 'unpicked', + status = $1, updated_at = NOW() WHERE - l1_batch_number = $1 - AND tee_type = $2 + l1_batch_number = $2 + AND tee_type = $3 "#, + TeeProofGenerationJobStatus::Unpicked.to_string(), batch_number, tee_type.to_string() ) @@ -117,30 +131,33 @@ impl TeeProofGenerationDal<'_, '_> { signature: &[u8], proof: &[u8], ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); let query = sqlx::query!( r#" UPDATE tee_proof_generation_details SET tee_type = $1, - status = 'generated', - pubkey = $2, - signature = $3, - proof = $4, + status = $2, + pubkey = $3, + signature = $4, + proof = $5, updated_at = NOW() WHERE - l1_batch_number = $5 + l1_batch_number = $6 "#, tee_type.to_string(), + TeeProofGenerationJobStatus::Generated.to_string(), pubkey, signature, proof, - i64::from(batch_number.0) + batch_number ); let instrumentation = Instrumented::new("save_proof_artifacts_metadata") .with_arg("tee_type", &tee_type) .with_arg("pubkey", &pubkey) .with_arg("signature", &signature) - .with_arg("proof", &proof); + .with_arg("proof", &proof) + .with_arg("l1_batch_number", &batch_number); let result = instrumentation .clone() .with(query) @@ -168,11 +185,12 @@ impl TeeProofGenerationDal<'_, '_> { INSERT INTO tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at) VALUES - ($1, $2, 'ready_to_be_proven', NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) ON CONFLICT (l1_batch_number, tee_type) DO NOTHING "#, batch_number, tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), ); let instrumentation = Instrumented::new("insert_tee_proof_generation_job") .with_arg("l1_batch_number", &batch_number) @@ -229,14 +247,16 @@ impl TeeProofGenerationDal<'_, '_> { tee_attestations ta ON tp.pubkey = ta.pubkey WHERE tp.l1_batch_number = $1 - AND tp.status = 'generated' + AND tp.status = $2 {} ORDER BY tp.l1_batch_number ASC, tp.tee_type ASC "#, - tee_type.map_or_else(String::new, |_| "AND tp.tee_type = $2".to_string()) + tee_type.map_or_else(String::new, |_| "AND tp.tee_type = $3".to_string()) ); - let mut query = sqlx::query_as(&query).bind(i64::from(batch_number.0)); + let mut query = sqlx::query_as(&query) + .bind(i64::from(batch_number.0)) + .bind(TeeProofGenerationJobStatus::Generated.to_string()); if let Some(tee_type) = tee_type { query = query.bind(tee_type.to_string()); @@ -257,13 +277,14 @@ impl TeeProofGenerationDal<'_, '_> { JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number WHERE inputs.status = $1 - AND proofs.status = 'ready_to_be_proven' + AND proofs.status = $2 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus + TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") .with(query) diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs index 4adee62e7aa6..4a178fd52253 100644 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ b/core/lib/dal/src/tee_verifier_input_producer_dal.rs @@ -17,7 +17,7 @@ pub struct TeeVerifierInputProducerDal<'a, 'c> { } /// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 2; +pub const JOB_MAX_ATTEMPT: i16 = 5; /// Time to wait for job to be processed const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml index fa5bb0b20af2..fb535d582325 100644 --- a/core/lib/db_connection/Cargo.toml +++ b/core/lib/db_connection/Cargo.toml @@ -12,7 +12,6 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true -zksync_health_check.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/core/lib/default_da_clients/README.md b/core/lib/default_da_clients/README.md deleted file mode 100644 index 17ced715b268..000000000000 --- a/core/lib/default_da_clients/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Default DA Clients - -This crate contains the default implementations of the Data Availability clients. Default clients are maintained within -this repo because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be -moved to the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. - -Currently, the following DataAvailability clients are implemented: - -- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode - utilizing the DA framework. -- `Object Store client` that stores the pubdata in the Object Store(GCS). diff --git a/core/lib/default_da_clients/src/no_da/mod.rs b/core/lib/default_da_clients/src/no_da/mod.rs deleted file mode 100644 index 814cf30c2cbd..000000000000 --- a/core/lib/default_da_clients/src/no_da/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod client; -pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/config.rs b/core/lib/default_da_clients/src/object_store/config.rs deleted file mode 100644 index 285c39827c79..000000000000 --- a/core/lib/default_da_clients/src/object_store/config.rs +++ /dev/null @@ -1,12 +0,0 @@ -use zksync_config::ObjectStoreConfig; -use zksync_env_config::envy_load; - -#[derive(Debug)] -pub struct DAObjectStoreConfig(pub ObjectStoreConfig); - -impl DAObjectStoreConfig { - pub fn from_env() -> anyhow::Result { - let config = envy_load("object_store", "DA_CLIENT_OBJECT_STORE_")?; - Ok(Self(config)) - } -} diff --git a/core/lib/default_da_clients/src/object_store/mod.rs b/core/lib/default_da_clients/src/object_store/mod.rs deleted file mode 100644 index 1600941b0572..000000000000 --- a/core/lib/default_da_clients/src/object_store/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod client; -pub mod config; -mod types; -pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/types.rs b/core/lib/default_da_clients/src/object_store/types.rs deleted file mode 100644 index b8ec9303e71e..000000000000 --- a/core/lib/default_da_clients/src/object_store/types.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::io::{Read, Write}; - -use flate2::{read::GzDecoder, write::GzEncoder, Compression}; -use zksync_object_store::{Bucket, StoredObject, _reexports::BoxedError}; -use zksync_types::L1BatchNumber; - -/// Used as a wrapper for the pubdata to be stored in the GCS. -#[derive(Debug)] -pub struct StorablePubdata { - pub data: Vec, -} - -impl StoredObject for StorablePubdata { - const BUCKET: Bucket = Bucket::DataAvailability; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("l1_batch_{key}_pubdata.gzip") - } - - fn serialize(&self) -> Result, BoxedError> { - let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(&self.data[..])?; - encoder.finish().map_err(From::from) - } - - fn deserialize(bytes: Vec) -> Result { - let mut decoder = GzDecoder::new(&bytes[..]); - let mut decompressed_bytes = Vec::new(); - decoder - .read_to_end(&mut decompressed_bytes) - .map_err(BoxedError::from)?; - - Ok(Self { - data: decompressed_bytes, - }) - } -} diff --git a/core/lib/env_config/src/base_token_adjuster.rs b/core/lib/env_config/src/base_token_adjuster.rs index f94e9c8f92a2..5003d5ea5873 100644 --- a/core/lib/env_config/src/base_token_adjuster.rs +++ b/core/lib/env_config/src/base_token_adjuster.rs @@ -28,6 +28,7 @@ mod tests { l1_tx_sending_sleep_ms: 30_000, price_fetching_max_attempts: 20, price_fetching_sleep_ms: 10_000, + l1_update_deviation_percentage: 20, halt_on_error: true, } } @@ -45,6 +46,7 @@ mod tests { l1_tx_sending_sleep_ms: 30_000, price_fetching_max_attempts: 3, price_fetching_sleep_ms: 5_000, + l1_update_deviation_percentage: 10, halt_on_error: false, } } @@ -62,6 +64,7 @@ mod tests { BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS=20000 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS=10 BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS=30000 + BASE_TOKEN_ADJUSTER_L1_UPDATE_DEVIATION_PERCENTAGE=20 BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS=20 BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS=10000 BASE_TOKEN_ADJUSTER_HALT_ON_ERROR=true @@ -85,6 +88,7 @@ mod tests { "BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_MAX_ATTEMPTS", "BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS", + "BASE_TOKEN_ADJUSTER_L1_UPDATE_DEVIATION_PERCENTAGE", "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_MAX_ATTEMPTS", "BASE_TOKEN_ADJUSTER_PRICE_FETCHING_SLEEP_MS", "BASE_TOKEN_ADJUSTER_HALT_ON_ERROR", diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3365f56add77..298c43b80ccd 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -89,6 +89,7 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs new file mode 100644 index 000000000000..f2da3b83f18a --- /dev/null +++ b/core/lib/env_config/src/da_client.rs @@ -0,0 +1,115 @@ +use zksync_config::configs::da_client::{ + DAClient, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, +}; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for DAClientConfig { + fn from_env() -> anyhow::Result { + let client_tag = std::env::var("DA_CLIENT")?; + let client = match client_tag.as_str() { + AVAIL_CLIENT_CONFIG_NAME => DAClient::Avail(envy_load("da_avail_config", "DA_")?), + OBJECT_STORE_CLIENT_CONFIG_NAME => { + DAClient::ObjectStore(envy_load("da_object_store", "DA_")?) + } + _ => anyhow::bail!("Unknown DA client name: {}", client_tag), + }; + + Ok(Self { client }) + } +} + +#[cfg(test)] +mod tests { + use zksync_config::{ + configs::{ + da_client::{DAClient, DAClient::ObjectStore}, + object_store::ObjectStoreMode::GCS, + }, + AvailConfig, DAClientConfig, ObjectStoreConfig, + }; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_object_store_da_client_config(url: String, max_retries: u16) -> DAClientConfig { + DAClientConfig { + client: ObjectStore(ObjectStoreConfig { + mode: GCS { + bucket_base_url: url, + }, + max_retries, + local_mirror_path: None, + }), + } + } + + #[test] + fn from_env_object_store() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="ObjectStore" + + DA_BUCKET_BASE_URL="sometestpath" + DA_MODE="GCS" + DA_MAX_RETRIES="5" + "#; + lock.set_env(config); + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_object_store_da_client_config("sometestpath".to_string(), 5) + ); + } + + fn expected_avail_da_layer_config( + api_node_url: &str, + bridge_api_url: &str, + seed: &str, + app_id: u32, + timeout: usize, + max_retries: usize, + ) -> DAClientConfig { + DAClientConfig { + client: DAClient::Avail(AvailConfig { + api_node_url: api_node_url.to_string(), + bridge_api_url: bridge_api_url.to_string(), + seed: seed.to_string(), + app_id, + timeout, + max_retries, + }), + } + } + + #[test] + fn from_env_avail_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Avail" + DA_API_NODE_URL="localhost:12345" + DA_BRIDGE_API_URL="localhost:54321" + DA_SEED="bottom drive obey lake curtain smoke basket hold race lonely fit walk" + DA_APP_ID="1" + DA_TIMEOUT="2" + DA_MAX_RETRIES="3" + "#; + + lock.set_env(config); + + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_avail_da_layer_config( + "localhost:12345", + "localhost:54321", + "bottom drive obey lake curtain smoke basket hold race lonely fit walk", + "1".parse::().unwrap(), + "2".parse::().unwrap(), + "3".parse::().unwrap(), + ) + ); + } +} diff --git a/core/lib/env_config/src/da_dispatcher.rs b/core/lib/env_config/src/da_dispatcher.rs index 194e4185b286..246752db91ac 100644 --- a/core/lib/env_config/src/da_dispatcher.rs +++ b/core/lib/env_config/src/da_dispatcher.rs @@ -26,6 +26,7 @@ mod tests { polling_interval_ms: Some(interval), max_rows_to_dispatch: Some(rows_limit), max_retries: Some(max_retries), + use_dummy_inclusion_data: Some(true), } } @@ -36,6 +37,7 @@ mod tests { DA_DISPATCHER_POLLING_INTERVAL_MS=5000 DA_DISPATCHER_MAX_ROWS_TO_DISPATCH=60 DA_DISPATCHER_MAX_RETRIES=7 + DA_DISPATCHER_USE_DUMMY_INCLUSION_DATA="true" "#; lock.set_env(config); let actual = DADispatcherConfig::from_env().unwrap(); diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index 33698221dc92..6eb199c7e438 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -32,7 +32,7 @@ mod tests { fn expected_config() -> FriProverConfig { FriProverConfig { - setup_data_path: "vk_setup_data_generator_server_fri/data".to_string(), + setup_data_path: "prover/data/keys".to_string(), prometheus_port: 3315, max_attempts: 10, generation_timeout_in_secs: 300, @@ -68,7 +68,7 @@ mod tests { fn from_env() { let mut lock = MUTEX.lock(); let config = r#" - FRI_PROVER_SETUP_DATA_PATH="vk_setup_data_generator_server_fri/data" + FRI_PROVER_SETUP_DATA_PATH="prover/data/keys" FRI_PROVER_PROMETHEUS_PORT="3315" FRI_PROVER_MAX_ATTEMPTS="10" FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index 1eb83ae2f39e..bf30fd4cc339 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -72,7 +72,7 @@ impl FromEnv for GenesisConfig { l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), l2_chain_id: network_config.zksync_network_id, - recursion_scheduler_level_vk_hash: contracts_config.snark_wrapper_vk_hash, + snark_wrapper_vk_hash: contracts_config.snark_wrapper_vk_hash, fee_account: state_keeper .fee_account_addr .context("Fee account required for genesis")?, diff --git a/core/lib/env_config/src/house_keeper.rs b/core/lib/env_config/src/house_keeper.rs index 25eeda793937..1a1ff4d27de2 100644 --- a/core/lib/env_config/src/house_keeper.rs +++ b/core/lib/env_config/src/house_keeper.rs @@ -18,21 +18,6 @@ mod tests { fn expected_config() -> HouseKeeperConfig { HouseKeeperConfig { l1_batch_metrics_reporting_interval_ms: 10_000, - gpu_prover_queue_reporting_interval_ms: 10_000, - prover_job_retrying_interval_ms: 10000, - prover_stats_reporting_interval_ms: 5_000, - witness_job_moving_interval_ms: 30_000, - witness_generator_stats_reporting_interval_ms: 10_000, - witness_generator_job_retrying_interval_ms: 30_000, - prover_db_pool_size: 2, - proof_compressor_job_retrying_interval_ms: 30_000, - proof_compressor_stats_reporting_interval_ms: 10_000, - prover_job_archiver_archiving_interval_ms: Some(1_800_000), - prover_job_archiver_archive_after_secs: Some(172_800), - // 24 hours - fri_gpu_prover_archiver_archiving_interval_ms: Some(86_400_000), - // 48 hours - fri_gpu_prover_archiver_archive_after_secs: Some(172_800), } } @@ -41,22 +26,6 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="10000" - HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" - HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS="40000" - HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" - HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" - HOUSE_KEEPER_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVING_INTERVAL_MS="1800000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVE_AFTER_SECS="172800" - HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVING_INTERVAL_MS="86400000" - HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVE_AFTER_SECS="172800" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 8cfa7b58a31c..b72c2c5d5b94 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -32,6 +32,8 @@ mod test_utils; mod vm_runner; mod wallets; +mod da_client; + pub trait FromEnv: Sized { fn from_env() -> anyhow::Result; } diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index efaf5d1666c3..730a79dd340a 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -65,7 +65,7 @@ mod tests { let config = ExperimentalVmConfig::from_env().unwrap(); assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); - assert_eq!(config.playground.db_path, "/db/vm_playground"); + assert_eq!(config.playground.db_path.unwrap(), "/db/vm_playground"); assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); assert!(config.playground.reset); @@ -83,6 +83,6 @@ mod tests { lock.remove_env(&["EXPERIMENTAL_VM_PLAYGROUND_DB_PATH"]); let config = ExperimentalVmConfig::from_env().unwrap(); - assert!(!config.playground.db_path.is_empty()); + assert!(config.playground.db_path.is_none()); } } diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index a245acdfacf6..5e76c10f53e7 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -16,7 +16,7 @@ zk_evm_1_4_1.workspace = true zk_evm_1_4_0.workspace = true zk_evm_1_3_3.workspace = true zk_evm_1_3_1.workspace = true -vm2.workspace = true +zksync_vm2.workspace = true circuit_sequencer_api_1_3_3.workspace = true circuit_sequencer_api_1_4_0.workspace = true @@ -40,6 +40,7 @@ tracing.workspace = true vise.workspace = true [dev-dependencies] +assert_matches.workspace = true tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true ethabi.workspace = true diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 320917d3f4f0..3cb61b461a42 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -11,9 +11,9 @@ impl GlueFrom contracts_used: value.contracts_used, cycles_used: value.cycles_used, total_log_queries: value.logs.total_log_queries_count, + gas_remaining: value.gas_remaining, // There are no such fields in `m5`. gas_used: 0, - gas_remaining: 0, computational_gas_used: 0, pubdata_published: 0, circuit_statistic: Default::default(), @@ -37,10 +37,10 @@ impl GlueFrom contracts_used: value.contracts_used, cycles_used: value.cycles_used, computational_gas_used: value.computational_gas_used, + gas_remaining: value.gas_remaining, total_log_queries: value.logs.total_log_queries_count, // There are no such fields in `m6`. gas_used: 0, - gas_remaining: 0, pubdata_published: 0, circuit_statistic: Default::default(), }, @@ -63,10 +63,10 @@ impl GlueFrom contracts_used: value.contracts_used, cycles_used: value.cycles_used, computational_gas_used: value.computational_gas_used, + gas_remaining: value.gas_remaining, total_log_queries: value.logs.total_log_queries_count, // There are no such fields in `1_3_2`. gas_used: 0, - gas_remaining: 0, pubdata_published: 0, circuit_statistic: Default::default(), }, diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 0a6517a6cd2f..35224d993a17 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -1,9 +1,6 @@ pub use self::{ - call_tracer::CallTracer, - multivm_dispatcher::TracerDispatcher, - prestate_tracer::PrestateTracer, - storage_invocation::StorageInvocations, - validator::{ValidationError, ValidationTracer, ValidationTracerParams}, + call_tracer::CallTracer, multivm_dispatcher::TracerDispatcher, prestate_tracer::PrestateTracer, + storage_invocation::StorageInvocations, validator::ValidationTracer, }; mod call_tracer; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a91006368b6a..a1573f24c668 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -10,12 +10,12 @@ use zksync_types::{ }; use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; -pub use crate::tracers::validator::types::{ValidationError, ValidationTracerParams}; +use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ glue::tracers::IntoOldVmTracer, - interface::storage::{StoragePtr, WriteStorage}, - tracers::validator::types::{ - NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule, + interface::{ + storage::{StoragePtr, WriteStorage}, + tracer::{ValidationParams, ViolatedValidationRule}, }, }; @@ -52,7 +52,7 @@ type ValidationRoundResult = Result ValidationTracer { pub fn new( - params: ValidationTracerParams, + params: ValidationParams, vm_version: VmVersion, ) -> (Self, Arc>) { let result = Arc::new(OnceCell::new()); @@ -181,8 +181,8 @@ impl ValidationTracer { } } - pub fn params(&self) -> ValidationTracerParams { - ValidationTracerParams { + pub fn params(&self) -> ValidationParams { + ValidationParams { user_address: self.user_address, paymaster_address: self.paymaster_address, trusted_slots: self.trusted_slots.clone(), diff --git a/core/lib/multivm/src/tracers/validator/types.rs b/core/lib/multivm/src/tracers/validator/types.rs index 418d2b893503..b9d442279927 100644 --- a/core/lib/multivm/src/tracers/validator/types.rs +++ b/core/lib/multivm/src/tracers/validator/types.rs @@ -1,9 +1,4 @@ -use std::{collections::HashSet, fmt, fmt::Display}; - -use zksync_types::{Address, H256, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::Halt; +use zksync_types::{Address, H256}; #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] @@ -21,72 +16,3 @@ pub(super) struct NewTrustedValidationItems { pub(super) new_allowed_slots: Vec, pub(super) new_trusted_addresses: Vec
, } - -#[derive(Debug, Clone)] -pub struct ValidationTracerParams { - pub user_address: Address, - pub paymaster_address: Address, - /// Slots that are trusted (i.e. the user can access them). - pub trusted_slots: HashSet<(Address, U256)>, - /// Trusted addresses (the user can access any slots on these addresses). - pub trusted_addresses: HashSet
, - /// Slots, that are trusted and the value of them is the new trusted address. - /// They are needed to work correctly with beacon proxy, where the address of the implementation is - /// stored in the beacon. - pub trusted_address_slots: HashSet<(Address, U256)>, - /// Number of computational gas that validation step is allowed to use. - pub computational_gas_limit: u32, -} - -#[derive(Debug, Clone)] -pub enum ViolatedValidationRule { - TouchedUnallowedStorageSlots(Address, U256), - CalledContractWithNoCode(Address), - TouchedUnallowedContext, - TookTooManyComputationalGas(u32), -} - -impl fmt::Display for ViolatedValidationRule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( - f, - "Touched unallowed storage slots: address {}, key: {}", - hex::encode(contract), - hex::encode(u256_to_h256(*key)) - ), - ViolatedValidationRule::CalledContractWithNoCode(contract) => { - write!(f, "Called contract with no code: {}", hex::encode(contract)) - } - ViolatedValidationRule::TouchedUnallowedContext => { - write!(f, "Touched unallowed context") - } - ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { - write!( - f, - "Took too many computational gas, allowed limit: {}", - gas_limit - ) - } - } - } -} - -#[derive(Debug, Clone)] -pub enum ValidationError { - FailedTx(Halt), - ViolatedRule(ViolatedValidationRule), -} - -impl Display for ValidationError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::FailedTx(revert_reason) => { - write!(f, "Validation revert: {}", revert_reason) - } - Self::ViolatedRule(rule) => { - write!(f, "Violated validation rules: {}", rule) - } - } - } -} diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs index 2beca41fb481..d1ddb2b44c80 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs index 3394a6c3f2b5..a51644ff9ea2 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs index 53b5bf04d2e7..7f9767a5e632 100644 --- a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index e963c79f4e41..c206bd6fb2ad 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_5_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -86,7 +86,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -100,7 +100,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs index 6107125d14d0..0badd7c58775 100644 --- a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs index bb166bedcdad..86a639915c9d 100644 --- a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs @@ -9,12 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, + tracer::ViolatedValidationRule, VmExecutionResultAndLogs, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -87,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -101,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 6af546318af4..871258f43b85 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -9,10 +9,9 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_fast, }; @@ -52,18 +51,6 @@ where self.main.push_transaction(tx); } - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { - let main_result = self.main.execute(execution_mode); - let shadow_result = self.shadow.execute(execution_mode); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result, &shadow_result); - errors - .into_result() - .with_context(|| format!("executing VM with mode {execution_mode:?}")) - .unwrap(); - main_result - } - fn inspect( &mut self, dispatcher: Self::TracerDispatcher, @@ -80,73 +67,17 @@ where main_result } - fn get_bootloader_memory(&self) -> BootloaderMemory { - let main_memory = self.main.get_bootloader_memory(); - let shadow_memory = self.shadow.get_bootloader_memory(); - DivergenceErrors::single("get_bootloader_memory", &main_memory, &shadow_memory).unwrap(); - main_memory - } - - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - let main_bytecodes = self.main.get_last_tx_compressed_bytecodes(); - let shadow_bytecodes = self.shadow.get_last_tx_compressed_bytecodes(); - DivergenceErrors::single( - "get_last_tx_compressed_bytecodes", - &main_bytecodes, - &shadow_bytecodes, - ) - .unwrap(); - main_bytecodes - } - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { self.shadow.start_new_l2_block(l2_block_env); self.main.start_new_l2_block(l2_block_env); } - fn get_current_execution_state(&self) -> CurrentExecutionState { - let main_state = self.main.get_current_execution_state(); - let shadow_state = self.shadow.get_current_execution_state(); - DivergenceErrors::single("get_current_execution_state", &main_state, &shadow_state) - .unwrap(); - main_state - } - - fn execute_transaction_with_bytecode_compression( - &mut self, - tx: Transaction, - with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { - let tx_hash = tx.hash(); - let main_result = self - .main - .execute_transaction_with_bytecode_compression(tx.clone(), with_compression); - let shadow_result = self - .shadow - .execute_transaction_with_bytecode_compression(tx, with_compression); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result.1, &shadow_result.1); - errors - .into_result() - .with_context(|| { - format!("executing transaction {tx_hash:?}, with_compression={with_compression:?}") - }) - .unwrap(); - main_result - } - fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { let tx_hash = tx.hash(); let main_result = self.main.inspect_transaction_with_bytecode_compression( tracer, @@ -171,13 +102,6 @@ where self.main.record_vm_memory_metrics() } - fn gas_remaining(&self) -> u32 { - let main_gas = self.main.gas_remaining(); - let shadow_gas = self.shadow.gas_remaining(); - DivergenceErrors::single("gas_remaining", &main_gas, &shadow_gas).unwrap(); - main_gas - } - fn finish_batch(&mut self) -> FinishedL1Batch { let main_batch = self.main.finish_batch(); let shadow_batch = self.shadow.finish_batch(); @@ -216,16 +140,6 @@ where pub struct DivergenceErrors(Vec); impl DivergenceErrors { - fn single( - context: &str, - main: &T, - shadow: &T, - ) -> anyhow::Result<()> { - let mut this = Self::default(); - this.check_match(context, main, shadow); - this.into_result() - } - fn check_results_match( &mut self, main_result: &VmExecutionResultAndLogs, @@ -251,6 +165,16 @@ impl DivergenceErrors { let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); self.check_match("logs.storage_logs", &main_logs, &shadow_logs); self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + self.check_match( + "statistics.circuit_statistic", + &main_result.statistics.circuit_statistic, + &shadow_result.statistics.circuit_statistic, + ); + self.check_match( + "gas_remaining", + &main_result.statistics.gas_remaining, + &shadow_result.statistics.gas_remaining, + ); } fn check_match(&mut self, context: &str, main: &T, shadow: &T) { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs index ed17ffc4c39b..59ccbd584e77 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs @@ -167,6 +167,8 @@ impl VmRevertReasonParsingResult { mod tests { use std::convert::TryFrom; + use assert_matches::assert_matches; + use super::VmRevertReason; #[test] @@ -202,7 +204,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); - assert!(matches!(reason, VmRevertReason::Unknown { .. })); + assert_matches!(reason, VmRevertReason::Unknown { .. }); } #[test] diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index f86beb2d400d..8068e4847b83 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,32 +1,25 @@ use std::collections::HashSet; -use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; -use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Transaction, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_types::Transaction; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, + L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, + VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, - vm_1_3_2::{events::merge_events, VmInstance}, + vm_1_3_2::VmInstance, }; #[derive(Debug)] pub struct Vm { pub(crate) vm: VmInstance, pub(crate) system_env: SystemEnv, - pub(crate) batch_env: L1BatchEnv, - pub(crate) last_tx_compressed_bytecodes: Vec, } impl VmInterface for Vm { @@ -81,83 +74,23 @@ impl VmInterface for Vm { } } - fn get_bootloader_memory(&self) -> BootloaderMemory { - vec![] - } - - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.last_tx_compressed_bytecodes.clone() - } - fn start_new_l2_block(&mut self, _l2_block_env: L2BlockEnv) { // Do nothing, because vm 1.3.2 doesn't support L2 blocks } - fn get_current_execution_state(&self) -> CurrentExecutionState { - let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); - let events = merge_events(raw_events) - .into_iter() - .map(|e| e.into_vm_event(self.batch_env.number)) - .collect(); - let l2_to_l1_logs = l1_messages - .into_iter() - .map(|m| { - UserL2ToL1Log(L2ToL1Log { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - }) - }) - .collect(); - - let used_contract_hashes = self - .vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .keys() - .cloned() - .collect(); - - let storage_log_queries = self.vm.state.storage.get_final_log_queries(); - - let deduped_storage_log_queries = - sort_storage_access_queries(storage_log_queries.iter().map(|log| &log.log_query)).1; - - CurrentExecutionState { - events, - deduplicated_storage_logs: deduped_storage_log_queries - .into_iter() - .map(GlueInto::glue_into) - .collect(), - used_contract_hashes, - user_l2_to_l1_logs: l2_to_l1_logs, - system_logs: vec![], - // Fields below are not produced by VM 1.3.2 - storage_refunds: vec![], - pubdata_costs: Vec::new(), - } - } - fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode .set_invocation_limit(storage_invocations); } - self.last_tx_compressed_bytecodes = vec![]; + + let compressed_bytecodes: Vec<_>; let bytecodes = if with_compression { let deps = &tx.execute.factory_deps; let mut deps_hashes = HashSet::with_capacity(deps.len()); @@ -174,18 +107,17 @@ impl VmInterface for Vm { bytecode::compress(bytecode.clone()).ok() } }); - let compressed_bytecodes: Vec<_> = filtered_deps.collect(); + compressed_bytecodes = filtered_deps.collect(); - self.last_tx_compressed_bytecodes - .clone_from(&compressed_bytecodes); crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - Some(compressed_bytecodes), + Some(compressed_bytecodes.clone()), ); bytecode_hashes } else { + compressed_bytecodes = vec![]; crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, @@ -224,7 +156,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + (Ok(compressed_bytecodes.into()), result) } } @@ -245,10 +177,6 @@ impl VmInterface for Vm { } } - fn gas_remaining(&self) -> u32 { - self.vm.gas_remaining() - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( @@ -270,7 +198,7 @@ impl VmFactory for Vm { let inner_vm: VmInstance = crate::vm_1_3_2::vm_with_bootloader::init_vm_with_gas_limit( oracle_tools, - batch_env.clone().glue_into(), + batch_env.glue_into(), block_properties, system_env.execution_mode.glue_into(), &system_env.base_system_smart_contracts.clone().glue_into(), @@ -279,8 +207,6 @@ impl VmFactory for Vm { Self { vm: inner_vm, system_env, - batch_env, - last_tx_compressed_bytecodes: vec![], } } } diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index b82282f0a567..de3bb2c22d77 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -142,6 +142,7 @@ pub struct VmPartialExecutionResult { pub contracts_used: usize, pub cycles_used: u32, pub computational_gas_used: u32, + pub gas_remaining: u32, } #[derive(Debug, Clone, PartialEq)] @@ -660,6 +661,7 @@ impl VmInstance { cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), }, call_traces: tx_tracer.call_traces(), }) @@ -762,6 +764,7 @@ impl VmInstance { .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), }; // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` @@ -810,6 +813,7 @@ impl VmInstance { contracts_used: 0, cycles_used: 0, computational_gas_used: 0, + gas_remaining: 0, }, } } else { @@ -863,6 +867,7 @@ impl VmInstance { .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs index 22d7b2814cf6..241054ae0345 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs index 6e0e31d461de..5f24f2465a32 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_1_4_1::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 01ee21f1836f..db5aaa783df5 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_1_4_1::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/gas.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/gas.rs index bd30aa6218b1..908c9466e895 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/gas.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{storage::WriteStorage, VmInterface}, + interface::storage::WriteStorage, vm_1_4_1::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs index f319964efb51..47e047ebbf72 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs @@ -47,10 +47,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs index 745f5ab378de..384bc4cf325e 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs @@ -28,7 +28,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -71,11 +71,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 8f20e8654d77..2c1a4ba5e36b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -8,7 +8,7 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, @@ -38,40 +38,11 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true); - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - tracer: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) @@ -106,19 +77,35 @@ impl VmInterface for Vm { pubdata_costs: Vec::new(), } } +} - /// Execute transaction with optional bytecode compression. +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true); + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + tracer: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode, None) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } - /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -127,7 +114,13 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } @@ -135,14 +128,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs index e692c8a2640d..c0d94bd685c4 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs index 54e69289521f..1033fff90e46 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_1_4_2::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index a04e071fe436..d42d18809331 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_1_4_2::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/gas.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/gas.rs index d5b74de94554..e560acc1cf7f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/gas.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{storage::WriteStorage, VmInterface}, + interface::storage::WriteStorage, vm_1_4_2::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs index 35d1666f10b9..8d69d05c4444 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs @@ -46,10 +46,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs index 0876dcf01a90..57b37e67b769 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs @@ -25,7 +25,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -68,11 +68,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index e612885086dc..71633dd3fca3 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -8,7 +8,7 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, @@ -38,40 +38,11 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true); - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - tracer: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) @@ -106,19 +77,35 @@ impl VmInterface for Vm { pubdata_costs: Vec::new(), } } +} - /// Execute transaction with optional bytecode compression. +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true); + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + tracer: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode, None) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } - /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -127,7 +114,13 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } @@ -135,14 +128,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs index 8a605978a1ed..830fe482320b 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs index b7e702b7a957..2d6f081a1886 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_boojum_integration::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 664cb90531e4..a7c790a4bc30 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_boojum_integration::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/gas.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/gas.rs index b31e4c3536bc..eb69f3552233 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/gas.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{storage::WriteStorage, VmInterface}, + interface::storage::WriteStorage, vm_boojum_integration::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs index 0ee3b811b4ca..57229abb0978 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs @@ -47,10 +47,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs index fc94e2c71526..f6b1d83e02a3 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs @@ -28,7 +28,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -71,11 +71,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 0a9e12865078..c7b4a5537acb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -8,7 +8,7 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, @@ -38,40 +38,11 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true); - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - tracer: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) @@ -106,8 +77,28 @@ impl VmInterface for Vm { pubdata_costs: Vec::new(), } } +} - /// Execute transaction with optional bytecode compression. +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true); + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + tracer: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( @@ -115,10 +106,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -127,7 +115,13 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } @@ -135,14 +129,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index ce37636d2cda..15b4daf02a77 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -189,11 +189,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs new file mode 100644 index 000000000000..de6ead71e655 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -0,0 +1,157 @@ +use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; +use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm_interface::CircuitStatistic; + +use crate::vm_latest::tracers::circuits_capacity::*; + +#[derive(Debug, Default, Clone, PartialEq)] +pub(crate) struct CircuitsTracer { + main_vm_cycles: u32, + ram_permutation_cycles: u32, + storage_application_cycles: u32, + storage_sorter_cycles: u32, + code_decommitter_cycles: u32, + code_decommitter_sorter_cycles: u32, + log_demuxer_cycles: u32, + events_sorter_cycles: u32, + keccak256_cycles: u32, + ecrecover_cycles: u32, + sha256_cycles: u32, + secp256k1_verify_cycles: u32, + transient_storage_checker_cycles: u32, +} + +impl Tracer for CircuitsTracer { + fn after_instruction(&mut self, _state: &mut S) { + self.main_vm_cycles += 1; + + match OP::VALUE { + Opcode::Nop + | Opcode::Add + | Opcode::Sub + | Opcode::Mul + | Opcode::Div + | Opcode::Jump + | Opcode::Xor + | Opcode::And + | Opcode::Or + | Opcode::ShiftLeft + | Opcode::ShiftRight + | Opcode::RotateLeft + | Opcode::RotateRight + | Opcode::PointerAdd + | Opcode::PointerSub + | Opcode::PointerPack + | Opcode::PointerShrink => { + self.ram_permutation_cycles += RICH_ADDRESSING_OPCODE_RAM_CYCLES; + } + Opcode::This + | Opcode::Caller + | Opcode::CodeAddress + | Opcode::ContextMeta + | Opcode::ErgsLeft + | Opcode::SP + | Opcode::ContextU128 + | Opcode::SetContextU128 + | Opcode::AuxMutating0 + | Opcode::IncrementTxNumber + | Opcode::Ret(_) + | Opcode::NearCall => { + self.ram_permutation_cycles += AVERAGE_OPCODE_RAM_CYCLES; + } + Opcode::StorageRead => { + self.ram_permutation_cycles += STORAGE_READ_RAM_CYCLES; + self.log_demuxer_cycles += STORAGE_READ_LOG_DEMUXER_CYCLES; + self.storage_sorter_cycles += STORAGE_READ_STORAGE_SORTER_CYCLES; + } + Opcode::TransientStorageRead => { + self.ram_permutation_cycles += TRANSIENT_STORAGE_READ_RAM_CYCLES; + self.log_demuxer_cycles += TRANSIENT_STORAGE_READ_LOG_DEMUXER_CYCLES; + self.transient_storage_checker_cycles += + TRANSIENT_STORAGE_READ_TRANSIENT_STORAGE_CHECKER_CYCLES; + } + Opcode::StorageWrite => { + self.ram_permutation_cycles += STORAGE_WRITE_RAM_CYCLES; + self.log_demuxer_cycles += STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.storage_sorter_cycles += STORAGE_WRITE_STORAGE_SORTER_CYCLES; + } + Opcode::TransientStorageWrite => { + self.ram_permutation_cycles += TRANSIENT_STORAGE_WRITE_RAM_CYCLES; + self.log_demuxer_cycles += TRANSIENT_STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.transient_storage_checker_cycles += + TRANSIENT_STORAGE_WRITE_TRANSIENT_STORAGE_CHECKER_CYCLES; + } + Opcode::L2ToL1Message | Opcode::Event => { + self.ram_permutation_cycles += EVENT_RAM_CYCLES; + self.log_demuxer_cycles += EVENT_LOG_DEMUXER_CYCLES; + self.events_sorter_cycles += EVENT_EVENTS_SORTER_CYCLES; + } + Opcode::PrecompileCall => { + self.ram_permutation_cycles += PRECOMPILE_RAM_CYCLES; + self.log_demuxer_cycles += PRECOMPILE_LOG_DEMUXER_CYCLES; + } + Opcode::Decommit => { + // Note, that for decommit the log demuxer circuit is not used. + self.ram_permutation_cycles += LOG_DECOMMIT_RAM_CYCLES; + self.code_decommitter_sorter_cycles += LOG_DECOMMIT_DECOMMITTER_SORTER_CYCLES; + } + Opcode::FarCall(_) => { + self.ram_permutation_cycles += FAR_CALL_RAM_CYCLES; + self.code_decommitter_sorter_cycles += FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; + self.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + self.log_demuxer_cycles += FAR_CALL_LOG_DEMUXER_CYCLES; + } + Opcode::AuxHeapWrite | Opcode::HeapWrite /* StaticMemoryWrite */ => { + self.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; + } + Opcode::AuxHeapRead | Opcode::HeapRead | Opcode::PointerRead /* StaticMemoryRead */ => { + self.ram_permutation_cycles += UMA_READ_RAM_CYCLES; + } + } + } + + fn on_extra_prover_cycles(&mut self, stats: CycleStats) { + match stats { + CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, + CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, + CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, + CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, + CycleStats::StorageRead => self.storage_application_cycles += 1, + CycleStats::StorageWrite => self.storage_application_cycles += 2, + } + } +} + +impl CircuitsTracer { + pub(crate) fn circuit_statistic(&self) -> CircuitStatistic { + CircuitStatistic { + main_vm: self.main_vm_cycles as f32 / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32, + ram_permutation: self.ram_permutation_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32, + storage_application: self.storage_application_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_application as f32, + storage_sorter: self.storage_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32, + code_decommitter: self.code_decommitter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32, + code_decommitter_sorter: self.code_decommitter_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32, + log_demuxer: self.log_demuxer_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32, + events_sorter: self.events_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32, + keccak256: self.keccak256_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32, + ecrecover: self.ecrecover_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, + sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: self.secp256k1_verify_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, + transient_storage_checker: self.transient_storage_checker_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, + } + } +} + +const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 798a1e12bdd8..2312c3d97b40 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ -use vm2::Event; use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; +use zksync_vm2::Event; use crate::interface::VmEvent; diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index cbf22f9122b0..f24c82af11e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -3,9 +3,9 @@ use zksync_utils::u256_to_h256; use crate::glue::GlueFrom; -impl GlueFrom<&vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &vm2::L2ToL1Log) -> Self { - let vm2::L2ToL1Log { +impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { + fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { + let zksync_vm2::L2ToL1Log { key, value, is_service, diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index 4deb6b9dbf74..f0d8bafe69ec 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -2,6 +2,7 @@ pub use self::vm::Vm; mod bootloader_state; mod bytecode; +mod circuits_tracer; mod events; mod glue; mod hook; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 239d40947a67..15af9d868adc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -10,22 +10,18 @@ use zksync_types::{ commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::versions::vm_fast::tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, +use super::{ + tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tracers::PubdataTracer, - L1BatchEnv, TracerDispatcher, + interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, }, }; @@ -130,7 +126,6 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute // the gas limit - let batch_env = L1BatchEnv { fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), ..default_l1_batch(zksync_types::L1BatchNumber(1)) @@ -143,15 +138,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { .with_l1_batch_env(batch_env) .build(); - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); + let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); vm.vm.insert_bytecodes(bytecodes); let txs_data = populate_mimic_calls(test_data.clone()); @@ -163,7 +150,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, calldata: data, value: U256::zero(), - factory_deps: None, + factory_deps: vec![], }, None, ); @@ -173,44 +160,25 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!( !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data + "Transaction {i} wasn't successful for input: {test_data:#?}" ); } - // Now we count how much ergs were spent at the end of the batch + // Now we count how much gas was spent at the end of the batch // It is assumed that the top level frame is the bootloader + vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); + let gas_before = vm.vm.gas_remaining(); - let ergs_before = vm.vm.gas_remaining(); - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - + let result = vm.vm.execute(VmExecutionMode::Batch); assert!( !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.gas_remaining(); - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used + "Batch wasn't successful for input: {test_data:?}" ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); TestStatistics { - max_used_gas: ergs_before - ergs_after, + max_used_gas: gas_before - gas_after, circuit_statistics: result.statistics.circuit_statistic.total() as u64, execution_metrics_size: result.get_execution_metrics(None).size() as u64, } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index c698d36683ef..5c1158a5909d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,7 +1,9 @@ +use assert_matches::assert_matches; use zksync_types::U256; +use zksync_vm2::HeapId; use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, versions::vm_fast::tests::{ tester::VmTesterBuilder, utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, @@ -24,10 +26,7 @@ fn test_dummy_bootloader() { let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.inner.state, - vec![(correct_first_cell, vm2::FIRST_HEAP, 0)], - ); + verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); } #[test] @@ -44,10 +43,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 56c20e785ee6..3070140c00b3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, utils::bytecode, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index c582bd28c882..0270ac35475b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,17 +1,16 @@ use zksync_types::{Address, Execute, U256}; +use super::tester::VmTesterBuilder; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; // Checks that estimated number of circuits for simple transfer doesn't differ much // from hardcoded expected value. #[test] fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_random_rich_accounts(1) .with_deployer() @@ -25,12 +24,12 @@ fn test_circuits() { contract_address: Address::random(), calldata: Vec::new(), value: U256::from(1u8), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm.vm.inspect((), VmExecutionMode::OneTx); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 24fda3beed4b..caea07617ddb 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -5,10 +5,13 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + vm_fast::{ + circuits_tracer::CircuitsTracer, + tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + }, }, }; @@ -207,11 +210,11 @@ fn refunds_in_code_oracle() { let account = &mut vm.rich_accounts[0]; if decommit { - let (_, is_fresh) = vm - .vm - .inner - .world_diff - .decommit_opcode(&mut vm.vm.world, h256_to_u256(normal_zkevm_bytecode_hash)); + let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( + &mut vm.vm.world, + &mut CircuitsTracer::default(), + h256_to_u256(normal_zkevm_bytecode_hash), + ); assert!(is_fresh); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index 460c8251652b..c2ce02d39fe1 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -7,7 +7,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::{get_balance, read_test_contract, verify_required_storage}, @@ -61,7 +61,7 @@ fn test_default_aa_interaction() { verify_required_storage( &expected_slots, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); let expected_fee = maximal_fee @@ -71,7 +71,7 @@ fn test_default_aa_interaction() { AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), &vm.fee_account, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); assert_eq!( diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index e0c55c5a685a..b7a2154bdc71 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -27,7 +27,7 @@ fn test_tx_gas_limit_offset() { vm.vm.push_transaction(tx); - assert!(vm.vm.inner.state.previous_frames.is_empty()); + assert!(!vm.vm.has_previous_far_calls()); let gas_limit_from_memory = vm .vm .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 1bfc2f8ff11f..3fcef71add07 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,17 +1,23 @@ -use std::collections::HashSet; +use std::{collections::HashSet, iter}; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, + }, vm_fast::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, vm::Vm, }, @@ -88,8 +94,138 @@ fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, data, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (mut vm, data, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: data.proxy_counter_address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs index ff97c0389aa9..df8d992f02fe 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -1,7 +1,9 @@ use zksync_types::get_nonce_key; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + }, vm_fast::tests::{ tester::{Account, TxType, VmTesterBuilder}, utils::read_test_contract, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index f1411497c24c..3b58565098d5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -9,7 +9,7 @@ use zksync_types::{ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, utils::StorageWritesDeduplicator, vm_fast::{ tests::{ @@ -82,7 +82,7 @@ fn test_l1_tx_execution() { ] { assert_eq!( expected_value, - vm.vm.inner.world_diff.get_storage_state()[&( + vm.vm.inner.world_diff().get_storage_state()[&( *storage_location.address(), h256_to_u256(*storage_location.key()) )] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index 6ff5ed426cba..a374f63608bc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -16,7 +16,7 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ interface::{ storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, + VmInterface, VmInterfaceExt, }, vm_fast::{ tests::tester::{default_l1_batch, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 9d5b229f23a9..730c573cdcf4 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,9 +1,9 @@ +mod block_tip; mod bootloader; -mod default_aa; -//mod block_tip; FIXME: requires vm metrics mod bytecode_publishing; +mod default_aa; // mod call_tracer; FIXME: requires tracers -// mod circuits; FIXME: requires tracers / circuit stats +mod circuits; mod code_oracle; mod gas_limit; mod get_used_contracts; @@ -11,7 +11,7 @@ mod is_write_initial; mod l1_tx_execution; mod l2_blocks; mod nonce_holder; -// mod precompiles; FIXME: requires tracers / circuit stats +mod precompiles; // mod prestate_tracer; FIXME: is pre-state tracer still relevant? mod refunds; mod require_eip712; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index b18676cf2ba6..122b38601175 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -2,7 +2,7 @@ use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; use crate::{ interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, VmRevertReason, }, vm_fast::tests::{ diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index 5bdf0930d558..f77eeb4f126e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,9 +1,9 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; use zksync_types::{Address, Execute}; +use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_fast::tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -30,25 +30,18 @@ fn test_keccak() { Execute { contract_address: address, calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); } #[test] @@ -74,25 +67,18 @@ fn test_sha256() { Execute { contract_address: address, calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert!(sha_count >= 1000); + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); } #[test] @@ -110,24 +96,17 @@ fn test_ecrecover() { let tx = account.get_l2_tx_for_execute( Execute { contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, + calldata: vec![], + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert_eq!(ecrecover_count, 1); + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 21a3129a3a61..5ad6e3fa4f3d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_types::{Address, Execute, U256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::{read_expensive_contract, read_test_contract}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 352e709b7043..68e49b202a93 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -9,7 +9,9 @@ use zksync_types::{ use zksync_utils::h256_to_u256; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + }, vm_fast::tests::{ tester::{Account, VmTester, VmTesterBuilder}, utils::read_many_owners_custom_account_contract, @@ -24,7 +26,7 @@ impl VmTester { ); self.vm .inner - .world_diff + .world_diff() .get_storage_state() .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) .copied() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index c530c5af18ea..a677a61c6029 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -56,7 +56,7 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), ]); - assert_eq!(result_without_rollbacks, result_with_rollbacks); + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); } #[test] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs index 76357d44cf38..a61a0a2bd91c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -4,7 +4,7 @@ use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_fast::tests::tester::VmTesterBuilder, }; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs index 7d866e1539b0..8c916a541e21 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -1,5 +1,7 @@ +use assert_matches::assert_matches; + use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_fast::tests::tester::{TxType, VmTesterBuilder}, }; @@ -25,7 +27,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -68,11 +70,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 733ce1f0618c..7fe15ca7bcd2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -3,7 +3,9 @@ use zksync_contracts::{load_contract, read_bytecode}; use zksync_types::{Address, Execute, U256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled}, + interface::{ + TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + }, vm_fast::tests::tester::VmTesterBuilder, }; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 562a8a6a6bdd..ce45390260c5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,11 +1,13 @@ +use std::fmt; + use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; use super::VmTester; use crate::{ interface::{ storage::ReadStorage, CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, - VmRevertReason, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, VmRevertReason, }, vm_fast::Vm, }; @@ -184,25 +186,33 @@ impl TransactionTestInfo { } // TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug, PartialEq)] -struct VmStateDump { - state: vm2::State, +#[derive(Debug)] +struct VmStateDump { + state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[vm2::Event]>, + events: Box<[zksync_vm2::Event]>, +} + +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state == other.state + && self.storage_writes == other.storage_writes + && self.events == other.events + } } impl Vm { - fn dump_state(&self) -> VmStateDump { + fn dump_state(&self) -> VmStateDump { VmStateDump { - state: self.inner.state.clone(), + state: self.inner.dump_state(), storage_writes: self .inner - .world_diff + .world_diff() .get_storage_state() .iter() .map(|(k, v)| (*k, *v)) .collect(), - events: self.inner.world_diff.events().into(), + events: self.inner.world_diff().events().into(), } } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs index efab73aed1df..8071bcf51d4a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -1,6 +1,5 @@ use std::{cell::RefCell, rc::Rc}; -use vm2::WorldDiff; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; use zksync_types::{ @@ -13,11 +12,13 @@ use zksync_types::{ StorageKey, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; +use zksync_vm2::WorldDiff; use crate::{ interface::{ storage::{InMemoryStorage, StoragePtr}, L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + VmInterfaceExt, }, versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, @@ -52,7 +53,7 @@ impl VmTester { pub(crate) fn reset_with_empty_storage(&mut self) { self.storage = Rc::new(RefCell::new(get_empty_storage())); - self.vm.inner.world_diff = WorldDiff::default(); + *self.vm.inner.world_diff_mut() = WorldDiff::default(); self.reset_state(false); } @@ -77,7 +78,7 @@ impl VmTester { { let mut storage = storage.borrow_mut(); // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff.get_storage_state() { + for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); storage.set_value(key, u256_to_h256(value)); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 3b61b8ac7f1e..57877854031d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -5,7 +5,7 @@ use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, E use zksync_utils::u256_to_h256; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_fast::tests::{ tester::{get_empty_storage, VmTesterBuilder}, utils::get_balance, @@ -92,7 +92,7 @@ fn test_send_or_transfer(test_option: TestOptions) { AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), &recipient_address, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); assert_eq!(new_recipient_balance, value); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index 616436776090..dd25c2097405 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -12,7 +12,7 @@ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use crate::{ interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, + ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, vm_fast::tests::{ @@ -164,7 +164,7 @@ fn test_force_deploy_upgrade() { verify_required_storage( &expected_slots, &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); } @@ -223,7 +223,7 @@ fn test_complex_upgrader() { verify_required_storage( &expected_slots, &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 6b17e66f2616..d91e13076514 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -2,7 +2,6 @@ use std::collections::BTreeMap; use ethabi::Contract; use once_cell::sync::Lazy; -use vm2::{instruction_handlers::HeapInterface, HeapId, State}; use zksync_contracts::{ load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, }; @@ -11,15 +10,19 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_vm2::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(crate) fn verify_required_memory(state: &State, required_values: Vec<(U256, HeapId, u32)>) { +pub(crate) fn verify_required_memory( + state: &impl StateInterface, + required_values: Vec<(U256, HeapId, u32)>, +) { for (required_value, memory_page, cell) in required_values { - let current_value = state.heaps[memory_page].read_u256(cell * 32); + let current_value = state.read_heap_u256(memory_page, cell * 32); assert_eq!(current_value, required_value); } } @@ -127,3 +130,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index a9b2fcd605c9..5a73ce49b06c 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,9 +1,5 @@ use std::{collections::HashMap, fmt}; -use vm2::{ - decode::decode_program, fat_pointer::FatPointer, instruction_handlers::HeapInterface, - ExecutionEnd, Program, Settings, VirtualMachine, -}; use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; use zksync_contracts::SystemContractCode; use zksync_types::{ @@ -19,10 +15,15 @@ use zksync_types::{ L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_vm2::{ + decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, + Settings, StateInterface, Tracer, VirtualMachine, +}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, bytecode::compress_bytecodes, + circuits_tracer::CircuitsTracer, hook::Hook, initial_bootloader_memory::bootloader_initial_memory, transaction_data::TransactionData, @@ -30,7 +31,7 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, + storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, @@ -55,20 +56,22 @@ use crate::{ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; pub struct Vm { - pub(crate) world: World, - pub(crate) inner: VirtualMachine, - suspended_at: u16, + pub(crate) world: World, + pub(crate) inner: VirtualMachine>, gas_for_account_validation: u32, pub(crate) bootloader_state: BootloaderState, pub(crate) batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, snapshot: Option, + #[cfg(test)] + enforced_state_diffs: Option>, } impl Vm { fn run( &mut self, execution_mode: VmExecutionMode, + tracer: &mut CircuitsTracer, track_refunds: bool, ) -> (ExecutionResult, Refunds) { let mut refunds = Refunds { @@ -76,17 +79,11 @@ impl Vm { operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff.pubdata() as u32; + let mut pubdata_before = self.inner.world_diff().pubdata() as u32; let result = loop { - let hook = match self.inner.resume_from(self.suspended_at, &mut self.world) { - ExecutionEnd::SuspendedOnHook { - hook, - pc_to_resume_from, - } => { - self.suspended_at = pc_to_resume_from; - hook - } + let hook = match self.inner.run(&mut self.world, tracer) { + ExecutionEnd::SuspendedOnHook(hook) => hook, ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, ExecutionEnd::Reverted(output) => { break match TxRevertReason::parse_error(&output) { @@ -128,7 +125,7 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff.pubdata() as u32; + let pubdata_published = self.inner.world_diff().pubdata() as u32; refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -164,10 +161,7 @@ impl Vm { let result = self.get_hook_params()[0]; let value = self.get_hook_params()[1]; let fp = FatPointer::from(value); - assert_eq!(fp.offset, 0); - - let return_data = self.inner.state.heaps[fp.memory_page] - .read_range_big_endian(fp.start..fp.start + fp.length); + let return_data = self.read_bytes_from_heap(fp); last_tx_result = Some(if result.is_zero() { ExecutionResult::Revert { @@ -193,7 +187,7 @@ impl Vm { } let events = - merge_events(self.inner.world_diff.events(), self.batch_env.number); + merge_events(self.inner.world_diff().events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -218,10 +212,7 @@ impl Vm { user_logs: extract_l2tol1logs_from_l1_messenger(&events), l2_to_l1_messages: VmEvent::extract_long_l2_to_l1_messages(&events), published_bytecodes, - state_diffs: self - .compute_state_diffs() - .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) - .collect(), + state_diffs: self.compute_state_diffs(), }; // Save the pubdata for the future initial bootloader memory building @@ -236,7 +227,13 @@ impl Vm { } Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } - Hook::DebugLog | Hook::DebugReturnData | Hook::NearCallCatch => { + Hook::DebugLog => { + let (log, log_arg) = self.get_debug_log(); + let last_tx = self.bootloader_state.last_l2_block().txs.last(); + let tx_hash = last_tx.map(|tx| tx.hash); + tracing::trace!(tx = ?tx_hash, "{log}: {log_arg}"); + } + Hook::DebugReturnData | Hook::NearCallCatch => { // These hooks are for debug purposes only } } @@ -254,9 +251,42 @@ impl Vm { .unwrap() } + fn get_debug_log(&self) -> (String, String) { + let hook_params = self.get_hook_params(); + let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); + // Trim 0 byte padding at the end. + while msg.last() == Some(&0) { + msg.pop(); + } + + let data = hook_params[1]; + let msg = String::from_utf8(msg).expect("Invalid debug message"); + + // For long data, it is better to use hex-encoding for greater readability + let data_str = if data > U256::from(u64::MAX) { + format!("0x{data:x}") + } else { + data.to_string() + }; + (msg, data_str) + } + /// Should only be used when the bootloader is executing (e.g., when handling hooks). pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { - self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) + let start_address = word as u32 * 32; + self.inner.read_heap_u256(HeapId::FIRST, start_address) + } + + fn read_bytes_from_heap(&self, ptr: FatPointer) -> Vec { + assert_eq!(ptr.offset, 0); + (ptr.start..ptr.start + ptr.length) + .map(|addr| self.inner.read_heap_byte(ptr.memory_page, addr)) + .collect() + } + + pub(crate) fn has_previous_far_calls(&mut self) -> bool { + let callframe_count = self.inner.number_of_callframes(); + (1..callframe_count).any(|i| !self.inner.callframe(i).is_near_call()) } /// Should only be used when the bootloader is executing (e.g., when handling hooks). @@ -264,12 +294,15 @@ impl Vm { &mut self, memory: impl IntoIterator, ) { - assert!(self.inner.state.previous_frames.is_empty()); + assert!( + !self.has_previous_far_calls(), + "Cannot write to bootloader heap when not in root call frame" + ); + for (slot, value) in memory { + let start_address = slot as u32 * 32; self.inner - .state - .heaps - .write_u256(vm2::FIRST_HEAP, slot as u32 * 32, value); + .write_heap_u256(HeapId::FIRST, start_address, value); } } @@ -297,7 +330,7 @@ impl Vm { } else { compress_bytecodes(&tx.factory_deps, |hash| { self.inner - .world_diff + .world_diff() .get_storage_state() .get(&(KNOWN_CODES_STORAGE_ADDRESS, h256_to_u256(hash))) .map(|x| !x.is_zero()) @@ -319,10 +352,19 @@ impl Vm { self.write_to_bootloader_heap(memory); } - fn compute_state_diffs(&mut self) -> impl Iterator + '_ { - let storage = &mut self.world.storage; + #[cfg(test)] + pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { + self.enforced_state_diffs = Some(diffs); + } - self.inner.world_diff.get_storage_changes().map( + fn compute_state_diffs(&mut self) -> Vec { + #[cfg(test)] + if let Some(enforced_diffs) = self.enforced_state_diffs.take() { + return enforced_diffs; + } + + let storage = &mut self.world.storage; + let diffs = self.inner.world_diff().get_storage_changes().map( move |((address, key), (initial_value, final_value))| { let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); StateDiffRecord { @@ -339,11 +381,18 @@ impl Vm { final_value, } }, - ) + ); + diffs + .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) + .collect() } pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { - self.inner.world_diff.decommitted_hashes() + self.inner.world_diff().decommitted_hashes() + } + + pub(super) fn gas_remaining(&mut self) -> u32 { + self.inner.current_frame().gas() } } @@ -357,13 +406,15 @@ impl Vm { .hash .into(); - let program_cache = HashMap::from([convert_system_contract_code( + let program_cache = HashMap::from([World::convert_system_contract_code( &system_env.base_system_smart_contracts.default_aa, false, )]); - let (_, bootloader) = - convert_system_contract_code(&system_env.base_system_smart_contracts.bootloader, true); + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); let bootloader_memory = bootloader_initial_memory(&batch_env); let mut inner = VirtualMachine::new( @@ -380,17 +431,17 @@ impl Vm { }, ); - inner.state.current_frame.sp = 0; - + inner.current_frame().set_stack_pointer(0); // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. - inner.state.current_frame.heap_size = u32::MAX; - inner.state.current_frame.aux_heap_size = u32::MAX; - inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; + inner.current_frame().set_heap_bound(u32::MAX); + inner.current_frame().set_aux_heap_bound(u32::MAX); + inner + .current_frame() + .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); - let mut me = Self { + let mut this = Self { world: World::new(storage, program_cache), inner, - suspended_at: 0, gas_for_account_validation: system_env.default_validation_computational_gas_limit, bootloader_state: BootloaderState::new( system_env.execution_mode, @@ -400,15 +451,48 @@ impl Vm { system_env, batch_env, snapshot: None, + #[cfg(test)] + enforced_state_diffs: None, }; + this.write_to_bootloader_heap(bootloader_memory); + this + } + + // visible for testing + pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { + let world_diff = self.inner.world_diff(); + let events = merge_events(world_diff.events(), self.batch_env.number); - me.write_to_bootloader_heap(bootloader_memory); + let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) + .into_iter() + .map(Into::into) + .map(UserL2ToL1Log) + .collect(); - me + CurrentExecutionState { + events, + deduplicated_storage_logs: world_diff + .get_storage_changes() + .map(|((address, key), (_, value))| StorageLog { + key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), + value: u256_to_h256(value), + kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here + }) + .collect(), + used_contract_hashes: self.decommitted_hashes().collect(), + system_logs: world_diff + .l2_to_l1_logs() + .iter() + .map(|x| x.glue_into()) + .collect(), + user_l2_to_l1_logs, + storage_refunds: world_diff.storage_refunds().to_vec(), + pubdata_costs: world_diff.pubdata_costs().to_vec(), + } } fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && self.inner.state.previous_frames.is_empty() { + if self.snapshot.is_none() && !self.has_previous_far_calls() { self.inner.delete_history(); } } @@ -433,10 +517,12 @@ impl VmInterface for Vm { track_refunds = true; } - let start = self.inner.world_diff.snapshot(); - let pubdata_before = self.inner.world_diff.pubdata(); + let mut tracer = CircuitsTracer::default(); + let start = self.inner.world_diff().snapshot(); + let pubdata_before = self.inner.world_diff().pubdata(); + let gas_before = self.gas_remaining(); - let (result, refunds) = self.run(execution_mode, track_refunds); + let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) && matches!(result, ExecutionResult::Halt { .. }); @@ -447,7 +533,7 @@ impl VmInterface for Vm { } else { let storage_logs = self .inner - .world_diff + .world_diff() .get_storage_changes_after(&start) .map(|((address, key), change)| StorageLogWithPreviousValue { log: StorageLog { @@ -463,7 +549,7 @@ impl VmInterface for Vm { }) .collect(); let events = merge_events( - self.inner.world_diff.events_after(&start), + self.inner.world_diff().events_after(&start), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -473,7 +559,7 @@ impl VmInterface for Vm { .collect(); let system_l2_to_l1_logs = self .inner - .world_diff + .world_diff() .l2_to_l1_logs_after(&start) .iter() .map(|x| x.glue_into()) @@ -487,7 +573,9 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff.pubdata(); + let pubdata_after = self.inner.world_diff().pubdata(); + let circuit_statistic = tracer.circuit_statistic(); + let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { result, logs, @@ -495,12 +583,12 @@ impl VmInterface for Vm { statistics: VmExecutionStatistics { contracts_used: 0, cycles_used: 0, - gas_used: 0, - gas_remaining: 0, + gas_used: (gas_before - gas_remaining).into(), + gas_remaining, computational_gas_used: 0, total_log_queries: 0, pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: Default::default(), + circuit_statistic, }, refunds, } @@ -511,77 +599,33 @@ impl VmInterface for Vm { (): Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); let result = self.inspect((), VmExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) } else { - Ok(()) + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()) }; (compression_result, result) } - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn get_current_execution_state(&self) -> CurrentExecutionState { - let world_diff = &self.inner.world_diff; - let events = merge_events(world_diff.events(), self.batch_env.number); - - let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) - .into_iter() - .map(Into::into) - .map(UserL2ToL1Log) - .collect(); - - CurrentExecutionState { - events, - deduplicated_storage_logs: world_diff - .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { - key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), - kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here - }) - .collect(), - used_contract_hashes: self.decommitted_hashes().collect(), - system_logs: world_diff - .l2_to_l1_logs() - .iter() - .map(|x| x.glue_into()) - .collect(), - user_l2_to_l1_logs, - storage_refunds: world_diff.storage_refunds().to_vec(), - pubdata_costs: world_diff.pubdata_costs().to_vec(), - } - } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { todo!("Unused during batch execution") } - fn gas_remaining(&self) -> u32 { - self.inner.state.current_frame.gas - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); + let result = self.inspect((), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, @@ -604,9 +648,8 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: vm2::Snapshot, + vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, - suspended_at: u16, gas_for_account_validation: u32, } @@ -621,7 +664,6 @@ impl VmInterfaceHistoryEnabled for Vm { self.snapshot = Some(VmSnapshot { vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), - suspended_at: self.suspended_at, gas_for_account_validation: self.gas_for_account_validation, }); } @@ -630,13 +672,11 @@ impl VmInterfaceHistoryEnabled for Vm { let VmSnapshot { vm_snapshot, bootloader_snapshot, - suspended_at, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); self.inner.rollback(vm_snapshot); self.bootloader_state.apply_snapshot(bootloader_snapshot); - self.suspended_at = suspended_at; self.gas_for_account_validation = gas_for_account_validation; self.delete_history_if_appropriate(); @@ -651,7 +691,6 @@ impl VmInterfaceHistoryEnabled for Vm { impl fmt::Debug for Vm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vm") - .field("suspended_at", &self.suspended_at) .field( "gas_for_account_validation", &self.gas_for_account_validation, @@ -667,50 +706,59 @@ impl fmt::Debug for Vm { } #[derive(Debug)] -pub(crate) struct World { +pub(crate) struct World { pub(crate) storage: S, - // TODO (PLA-1008): Store `Program`s in an LRU cache - program_cache: HashMap, + program_cache: HashMap>, pub(crate) bytecode_cache: HashMap>, } -impl World { - fn new(storage: S, program_cache: HashMap) -> Self { +impl World { + fn new(storage: S, program_cache: HashMap>) -> Self { Self { storage, program_cache, bytecode_cache: Default::default(), } } -} -impl vm2::World for World { - fn decommit_code(&mut self, hash: U256) -> Vec { - self.decommit(hash) - .code_page() - .as_ref() - .iter() - .flat_map(|u| { - let mut buffer = [0u8; 32]; - u.to_big_endian(&mut buffer); - buffer - }) - .collect() + fn bytecode_to_program(bytecode: &[u8]) -> Program { + Program::new( + decode_program( + &bytecode + .chunks_exact(8) + .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) + .collect::>(), + false, + ), + bytecode + .chunks_exact(32) + .map(U256::from_big_endian) + .collect::>(), + ) } - fn decommit(&mut self, hash: U256) -> Program { - self.program_cache - .entry(hash) - .or_insert_with(|| { - bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { - self.storage - .load_factory_dep(u256_to_h256(hash)) - .expect("vm tried to decommit nonexistent bytecode") - })) - }) - .clone() + fn convert_system_contract_code( + code: &SystemContractCode, + is_bootloader: bool, + ) -> (U256, Program) { + ( + h256_to_u256(code.hash), + Program::new( + decode_program( + &code + .code + .iter() + .flat_map(|x| x.0.into_iter().rev()) + .collect::>(), + is_bootloader, + ), + code.code.clone(), + ), + ) } +} +impl zksync_vm2::StorageInterface for World { fn read_storage(&mut self, contract: H160, key: U256) -> Option { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); if self.storage.is_write_initial(key) { @@ -755,35 +803,30 @@ impl vm2::World for World { } } -fn bytecode_to_program(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) -} +impl zksync_vm2::World for World { + fn decommit(&mut self, hash: U256) -> Program { + self.program_cache + .entry(hash) + .or_insert_with(|| { + Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + self.storage + .load_factory_dep(u256_to_h256(hash)) + .expect("vm tried to decommit nonexistent bytecode") + })) + }) + .clone() + } -fn convert_system_contract_code(code: &SystemContractCode, is_bootloader: bool) -> (U256, Program) { - ( - h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), - ) + fn decommit_code(&mut self, hash: U256) -> Vec { + self.decommit(hash) + .code_page() + .as_ref() + .iter() + .flat_map(|u| { + let mut buffer = [0u8; 32]; + u.to_big_endian(&mut buffer); + buffer + }) + .collect() + } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index f15199a74f84..4ba27b14bad6 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs index d0a41ce69f42..2cd98c8e58a3 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_latest::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 4676fd82d5e2..66fc1a8bfd71 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_latest::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs b/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs index 1e33eecf6325..8d006a467795 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs @@ -1,8 +1,4 @@ -use crate::{ - interface::{storage::WriteStorage, VmInterface}, - vm_latest::vm::Vm, - HistoryMode, -}; +use crate::{interface::storage::WriteStorage, vm_latest::vm::Vm, HistoryMode}; impl Vm { pub(crate) fn calculate_computational_gas_used(&self, gas_remaining_before: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index f1851eaae425..bed348afd2d9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -15,7 +15,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u25 use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index 4b60c1992025..9d23f658cb82 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,7 +1,8 @@ +use assert_matches::assert_matches; use zksync_types::U256; use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, vm_latest::{ constants::BOOTLOADER_HEAP_PAGE, tests::{ @@ -47,10 +48,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index ef56aafe4cbe..2ed9948af819 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, utils::bytecode, vm_latest::{ tests::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 7174e9be67de..0708d67e27a3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -9,7 +9,7 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{get_empty_storage, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index 34297d991d10..aa3eb5e752ce 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -7,7 +7,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index 752fd1a9087d..a42037a7f5be 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,9 +1,13 @@ use std::{ collections::{HashMap, HashSet}, + iter, str::FromStr, }; +use assert_matches::assert_matches; +use ethabi::Token; use itertools::Itertools; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zk_evm_1_5_0::{ abstractions::DecommittmentProcessor, aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, @@ -11,15 +15,19 @@ use zk_evm_1_5_0::{ }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmExecutionResultAndLogs; use crate::{ - interface::{storage::WriteStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, + VmInterfaceExt, + }, vm_latest::{ tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + tester::{TxType, VmTester, VmTesterBuilder}, + utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, }, HistoryDisabled, Vm, }, @@ -148,10 +156,92 @@ fn known_bytecodes_without_aa_code( .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code } + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new(HistoryDisabled) + .with_empty_in_memory_storage() + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: deploy_tx.address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + (vm, counter_bytecode_hash, exec_result) +} + +#[test] +fn get_used_contracts_with_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +#[test] +fn get_used_contracts_with_out_of_gas_far_call() { + let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.get_used_contracts(); + assert!( + decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index 900f322bc3f3..8206cfa9be6f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,7 +1,9 @@ use zksync_types::get_nonce_key; use crate::{ - interface::{storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ + storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + }, vm_latest::{ tests::{ tester::{Account, TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 4d42bb96cc96..dcb1bff06d09 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -10,7 +10,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, utils::StorageWritesDeduplicator, vm_latest::{ tests::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 1f4c36bb25b7..1b5c3db59f72 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -17,7 +17,7 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ interface::{ storage::WriteStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, + VmInterface, VmInterfaceExt, }, vm_latest::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs index 6bd0e87615ed..5b8da2551808 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs @@ -1,7 +1,7 @@ use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 076ecb523618..661286ca9697 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -2,7 +2,7 @@ use zksync_types::{Execute, Nonce}; use crate::{ interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, VmRevertReason, }, vm_latest::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 893ca57bc4d1..eb3104fd637a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -5,7 +5,7 @@ use zksync_test_account::TxType; use zksync_types::{utils::deployed_address_create, Execute, U256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, vm_latest::{ constants::BATCH_COMPUTATIONAL_GAS_LIMIT, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index 52dbd6efb339..ca058d672d2e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_types::{Address, Execute, U256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 5178c5dc29cf..779e9b5c629d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -8,7 +8,7 @@ use zksync_types::{ }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{Account, VmTester, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 489c762aac4e..43e7baae3b2d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -6,7 +6,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, + TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs index 3cd50e0eb917..6cc731a1387c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs @@ -4,7 +4,7 @@ use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index a864538524a2..cd020ee9f966 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,5 +1,7 @@ +use assert_matches::assert_matches; + use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, + interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::tester::{TxType, VmTesterBuilder}, HistoryDisabled, @@ -28,7 +30,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -71,11 +73,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index b7c14c54f6df..0fe0b0220fae 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -4,7 +4,9 @@ use zksync_test_account::Account; use zksync_types::{fee::Fee, Address, Execute, U256}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled}, + interface::{ + TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + }, vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index 114f80d1a217..08667ccc625f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -3,7 +3,8 @@ use zksync_types::{ExecuteTransactionCommon, Transaction}; use crate::{ interface::{ CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + VmRevertReason, }, vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, }; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index 9aba2539e001..1fe4232c7780 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -15,7 +15,7 @@ use crate::{ interface::{ storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}, L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, + VmInterface, VmInterfaceExt, }, vm_latest::{ constants::BATCH_COMPUTATIONAL_GAS_LIMIT, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index f4198d541f73..31f6c3291ef6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -5,7 +5,7 @@ use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, E use zksync_utils::u256_to_h256; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::{ tests::{ tester::{get_empty_storage, VmTesterBuilder}, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 020b12a7a6e9..7c3ebff4a77d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -15,7 +15,7 @@ use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ interface::{ storage::WriteStorage, ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceHistoryEnabled, + VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, vm_latest::{ tests::{ diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index cfa7ba1c7e2c..c5487379ce31 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -137,3 +137,8 @@ pub(crate) fn read_expensive_contract() -> (Vec, Contract) { "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; (read_bytecode(PATH), load_contract(PATH)) } + +pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index bad09617b8f0..0a11f5d3f849 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -6,7 +6,8 @@ use zk_evm_1_5_0::{ }, }; use zksync_system_constants::{ - ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, + SECP256R1_VERIFY_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; use zksync_types::U256; use zksync_utils::u256_to_h256; @@ -95,7 +96,10 @@ pub(crate) fn get_debug_log( .into_iter() .map(u256_to_h256) .collect(); - let msg = vm_hook_params[0].as_bytes().to_vec(); + let mut msg = vm_hook_params[0].as_bytes().to_vec(); + while msg.last() == Some(&0) { + msg.pop(); + } let data = vm_hook_params[1].as_bytes().to_vec(); let msg = String::from_utf8(msg).expect("Invalid debug message"); @@ -109,10 +113,8 @@ pub(crate) fn get_debug_log( } else { data.to_string() }; - let tx_id = state.vm_local_state.tx_number_in_block; - - format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) + format!("Bootloader transaction {tx_id}: {msg}: {data_str}") } /// Reads the memory slice represented by the fat pointer. @@ -167,8 +169,7 @@ pub(crate) fn print_debug_if_needed( VmHook::DebugReturnData => get_debug_returndata(memory, latest_returndata_ptr), _ => return, }; - - tracing::trace!("{}", log); + tracing::trace!("{log}"); } pub(crate) fn computational_gas_price( @@ -187,6 +188,7 @@ pub(crate) fn computational_gas_price( if address == KECCAK256_PRECOMPILE_ADDRESS || address == SHA256_PRECOMPILE_ADDRESS || address == ECRECOVER_PRECOMPILE_ADDRESS + || address == SECP256R1_VERIFY_PRECOMPILE_ADDRESS { data.src1_value.value.low_u32() } else { diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 1c85133e1178..a445a1d51402 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -9,7 +9,7 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, @@ -73,41 +73,13 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true); - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - tracer: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. - fn get_current_execution_state(&self) -> CurrentExecutionState { + // visible for testing + pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) .into_iter() @@ -140,8 +112,28 @@ impl VmInterface for Vm { pubdata_costs: self.state.storage.returned_pubdata_costs.inner().clone(), } } +} + +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true); + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + tracer: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode, None) + } - /// Execute transaction with optional bytecode compression. + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( @@ -149,10 +141,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -161,7 +150,13 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } @@ -169,14 +164,10 @@ impl VmInterface for Vm { self.record_vm_memory_metrics_inner() } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, diff --git a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs index 7cfa8708fc30..ff3f02ed7161 100644 --- a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs @@ -148,6 +148,8 @@ impl VmRevertReasonParsingResult { mod tests { use std::convert::TryFrom; + use assert_matches::assert_matches; + use super::VmRevertReason; #[test] @@ -182,7 +184,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); - assert!(matches!(reason, VmRevertReason::Unknown { .. })); + assert_matches!(reason, VmRevertReason::Unknown { .. }); } #[test] diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 8f232c95b38e..df4baccaf156 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,23 +1,14 @@ -use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; -use itertools::Itertools; -use zk_evm_1_3_1::aux_structures::LogQuery; -use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - vm::VmVersion, - Transaction, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{vm::VmVersion, Transaction}; +use zksync_utils::h256_to_u256; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, - CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ - events::merge_events, storage::Storage, vm_instance::{MultiVMSubversion, VmInstance}, }, @@ -27,8 +18,6 @@ use crate::{ pub struct Vm { pub(crate) vm: VmInstance, pub(crate) system_env: SystemEnv, - pub(crate) batch_env: L1BatchEnv, - pub(crate) last_tx_compressed_bytecodes: Vec, _phantom: std::marker::PhantomData, } @@ -49,7 +38,7 @@ impl Vm { let inner_vm = crate::vm_m5::vm_with_bootloader::init_vm_with_gas_limit( vm_sub_version, oracle_tools, - batch_env.clone().glue_into(), + batch_env.glue_into(), block_properties, system_env.execution_mode.glue_into(), &system_env.base_system_smart_contracts.clone().glue_into(), @@ -58,8 +47,6 @@ impl Vm { Self { vm: inner_vm, system_env, - batch_env, - last_tx_compressed_bytecodes: vec![], _phantom: Default::default(), } } @@ -97,95 +84,23 @@ impl VmInterface for Vm { } } - fn get_bootloader_memory(&self) -> BootloaderMemory { - vec![] - } - - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.last_tx_compressed_bytecodes.clone() - } - fn start_new_l2_block(&mut self, _l2_block_env: L2BlockEnv) { // Do nothing, because vm 1.3.2 doesn't support L2 blocks } - fn get_current_execution_state(&self) -> CurrentExecutionState { - let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); - let events = merge_events(raw_events) - .into_iter() - .map(|e| e.into_vm_event(self.batch_env.number)) - .collect(); - let l2_to_l1_logs = l1_messages - .into_iter() - .map(|m| { - UserL2ToL1Log(L2ToL1Log { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - }) - }) - .collect(); - - let used_contract_hashes = self - .vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .keys() - .cloned() - .collect(); - - let storage_log_queries = self.vm.get_final_log_queries(); - - // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted - // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. - let deduplicated_logs: Vec = sort_storage_access_queries( - &storage_log_queries - .iter() - .map(|log| { - GlueInto::::glue_into(log.log_query) - }) - .collect_vec(), - ) - .1 - .into_iter() - .map(GlueInto::::glue_into) - .collect(); - - CurrentExecutionState { - events, - deduplicated_storage_logs: deduplicated_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), - used_contract_hashes, - system_logs: vec![], - user_l2_to_l1_logs: l2_to_l1_logs, - // Fields below are not produced by `vm5` - storage_refunds: vec![], - pubdata_costs: vec![], - } - } - fn inspect_transaction_with_bytecode_compression( &mut self, _tracer: Self::TracerDispatcher, tx: Transaction, _with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), ); - (Ok(()), self.execute(VmExecutionMode::OneTx)) + // Bytecode compression isn't supported + (Ok(vec![].into()), self.inspect((), VmExecutionMode::OneTx)) } fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { @@ -201,10 +116,6 @@ impl VmInterface for Vm { } } - fn gas_remaining(&self) -> u32 { - self.vm.gas_remaining() - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index f0a94d0c3b6e..4a96c4a750cc 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -157,6 +157,7 @@ pub struct VmPartialExecutionResult { pub revert_reason: Option, pub contracts_used: usize, pub cycles_used: u32, + pub gas_remaining: u32, } #[derive(Debug, Clone, PartialEq)] @@ -682,6 +683,7 @@ impl VmInstance { .get_decommitted_bytes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + gas_remaining: self.gas_remaining(), }, }) } else { @@ -743,6 +745,7 @@ impl VmInstance { .decommittment_processor .get_decommitted_bytes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + gas_remaining: self.gas_remaining(), }; // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` @@ -799,6 +802,7 @@ impl VmInstance { .decommittment_processor .get_decommitted_bytes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, + gas_remaining: self.gas_remaining(), } } diff --git a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs index 0e5bf9fd8346..cc1a1aa2c653 100644 --- a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs @@ -167,6 +167,8 @@ impl VmRevertReasonParsingResult { mod tests { use std::convert::TryFrom; + use assert_matches::assert_matches; + use super::VmRevertReason; #[test] @@ -202,7 +204,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let reason = VmRevertReason::try_from(msg.as_slice()).expect("Shouldn't be error"); - assert!(matches!(reason, VmRevertReason::Unknown { .. })); + assert_matches!(reason, VmRevertReason::Unknown { .. }); } #[test] diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index b59561319f56..7e19076a5202 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,34 +1,25 @@ use std::collections::HashSet; -use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; -use itertools::Itertools; -use zk_evm_1_3_1::aux_structures::LogQuery; -use zksync_types::{ - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - vm::VmVersion, - Transaction, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_types::{vm::VmVersion, Transaction}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, - CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, - vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, + vm_m6::{storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, }; #[derive(Debug)] pub struct Vm { pub(crate) vm: VmInstance, pub(crate) system_env: SystemEnv, - pub(crate) batch_env: L1BatchEnv, - pub(crate) last_tx_compressed_bytecodes: Vec, } impl Vm { @@ -48,7 +39,7 @@ impl Vm { let inner_vm = crate::vm_m6::vm_with_bootloader::init_vm_with_gas_limit( vm_sub_version, oracle_tools, - batch_env.clone().glue_into(), + batch_env.glue_into(), block_properties, system_env.execution_mode.glue_into(), &system_env.base_system_smart_contracts.clone().glue_into(), @@ -57,8 +48,6 @@ impl Vm { Self { vm: inner_vm, system_env, - batch_env, - last_tx_compressed_bytecodes: vec![], } } } @@ -111,96 +100,23 @@ impl VmInterface for Vm { } } - fn get_bootloader_memory(&self) -> BootloaderMemory { - vec![] - } - - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.last_tx_compressed_bytecodes.clone() - } - fn start_new_l2_block(&mut self, _l2_block_env: L2BlockEnv) { // Do nothing, because vm 1.3.2 doesn't support L2 blocks } - fn get_current_execution_state(&self) -> CurrentExecutionState { - let (raw_events, l1_messages) = self.vm.state.event_sink.flatten(); - let events = merge_events(raw_events) - .into_iter() - .map(|e| e.into_vm_event(self.batch_env.number)) - .collect(); - let l2_to_l1_logs = l1_messages - .into_iter() - .map(|m| { - UserL2ToL1Log(L2ToL1Log { - shard_id: m.shard_id, - is_service: m.is_first, - tx_number_in_block: m.tx_number_in_block, - sender: m.address, - key: u256_to_h256(m.key), - value: u256_to_h256(m.value), - }) - }) - .collect(); - - let used_contract_hashes = self - .vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .keys() - .cloned() - .collect(); - - let storage_log_queries = self.vm.get_final_log_queries(); - - // To allow calling the `vm-1.3.3`s. method, the `v1.3.1`'s `LogQuery` has to be converted - // to the `vm-1.3.3`'s `LogQuery`. Then, we need to convert it back. - let deduplicated_logs: Vec = sort_storage_access_queries( - &storage_log_queries - .iter() - .map(|log| { - GlueInto::::glue_into(log.log_query) - }) - .collect_vec(), - ) - .1 - .into_iter() - .map(GlueInto::::glue_into) - .collect(); - - CurrentExecutionState { - events, - deduplicated_storage_logs: deduplicated_logs - .into_iter() - .map(GlueInto::glue_into) - .collect(), - used_contract_hashes, - user_l2_to_l1_logs: l2_to_l1_logs, - // Fields below are not produced by `vm6` - system_logs: vec![], - storage_refunds: vec![], - pubdata_costs: vec![], - } - } - fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode .set_invocation_limit(storage_invocations); } - self.last_tx_compressed_bytecodes = vec![]; + let compressed_bytecodes: Vec<_>; let bytecodes = if with_compression { let deps = &tx.execute.factory_deps; let mut deps_hashes = HashSet::with_capacity(deps.len()); @@ -217,18 +133,17 @@ impl VmInterface for Vm { bytecode::compress(bytecode.clone()).ok() } }); - let compressed_bytecodes: Vec<_> = filtered_deps.collect(); + compressed_bytecodes = filtered_deps.collect(); - self.last_tx_compressed_bytecodes - .clone_from(&compressed_bytecodes); crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - Some(compressed_bytecodes), + Some(compressed_bytecodes.clone()), ); bytecode_hashes } else { + compressed_bytecodes = vec![]; crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, @@ -267,7 +182,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + (Ok(compressed_bytecodes.into()), result) } } @@ -288,10 +203,6 @@ impl VmInterface for Vm { } } - fn gas_remaining(&self) -> u32 { - self.vm.gas_remaining() - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index bc60530b6f55..d6c418da4c20 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -159,6 +159,7 @@ pub struct VmPartialExecutionResult { pub contracts_used: usize, pub cycles_used: u32, pub computational_gas_used: u32, + pub gas_remaining: u32, } #[derive(Debug, Clone, PartialEq)] @@ -673,6 +674,7 @@ impl VmInstance { cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), }, call_traces: tx_tracer.call_traces(), }) @@ -775,6 +777,7 @@ impl VmInstance { .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), }; // Collecting `block_tip_result` needs logs with timestamp, so we drain events for the `full_result` @@ -823,6 +826,7 @@ impl VmInstance { contracts_used: 0, cycles_used: 0, computational_gas_used: 0, + gas_remaining: 0, }, } } else { @@ -876,6 +880,7 @@ impl VmInstance { .get_decommitted_bytecodes_after_timestamp(timestamp_initial), cycles_used: self.state.local_state.monotonic_cycle_counter - cycles_initial, computational_gas_used, + gas_remaining: self.gas_remaining(), } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index 12aab3c7364c..b428851c9383 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs index 2289cca7a47c..f7ab9ae8b517 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_refunds_enhancement::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 3f6dd7e0e9e5..cadd183735e6 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_refunds_enhancement::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs index 0f4b5c6b6b0e..d957697a0681 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{storage::WriteStorage, VmInterface}, + interface::storage::WriteStorage, vm_refunds_enhancement::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs index bfa439106eaa..23b250d485b7 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs @@ -45,10 +45,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs index f85c2144de1d..eb5e38798379 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs @@ -24,7 +24,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -67,11 +67,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 821a8144249e..119abf052b9f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -5,9 +5,10 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -34,40 +35,11 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - dispatcher: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) @@ -98,6 +70,28 @@ impl VmInterface for Vm { pubdata_costs: Vec::new(), } } +} + +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true) + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(dispatcher, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( @@ -105,10 +99,7 @@ impl VmInterface for Vm { dispatcher: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect(dispatcher, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -117,17 +108,32 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { self.record_vm_memory_metrics_inner() } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let execution_state = self.get_current_execution_state(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); + FinishedL1Batch { + block_tip_execution_result: result, + final_execution_state: execution_state, + final_bootloader_memory: Some(bootloader_memory), + pubdata_input: None, + state_diffs: None, + } + } } impl VmFactory for Vm { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 562d74513710..7e9af0ed6b82 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs index 96a30d508054..d5f2b50b83fc 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs @@ -5,7 +5,7 @@ use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - CompressedBytecodeInfo, VmInterface, + CompressedBytecodeInfo, }, utils::bytecode, vm_virtual_blocks::Vm, @@ -15,15 +15,18 @@ use crate::{ impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { - self.get_last_tx_compressed_bytecodes().iter().any(|info| { - !self - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) - }) + self.bootloader_state + .get_last_tx_compressed_bytecodes() + .iter() + .any(|info| { + !self + .state + .storage + .storage + .get_ptr() + .borrow_mut() + .is_bytecode_known(&hash_bytecode(&info.original)) + }) } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index aafcca3821be..42709c345ea6 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -4,7 +4,7 @@ use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStopReason, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmExecutionMode, VmExecutionResultAndLogs, }, vm_virtual_blocks::{ old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs index 28f0ec6df4a9..3b7af470f2cd 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs @@ -1,5 +1,5 @@ use crate::{ - interface::{storage::WriteStorage, VmInterface}, + interface::storage::WriteStorage, vm_virtual_blocks::{tracers::DefaultExecutionTracer, vm::Vm}, HistoryMode, }; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs index 5abbd1dde47f..a30b5a58f638 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs @@ -44,10 +44,10 @@ fn test_bootloader_out_of_gas() { let res = vm.vm.execute(VmExecutionMode::Batch); - assert!(matches!( + assert_matches!( res.result, ExecutionResult::Halt { reason: Halt::BootloaderOutOfGas } - )); + ); } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs index 6b2237f5e59d..c4eac73499fc 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs @@ -24,7 +24,7 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert!(matches!(result.result, ExecutionResult::Success { .. })); + assert_matches!(result.result, ExecutionResult::Success { .. }); } #[test] @@ -67,11 +67,11 @@ fn simple_execute() { vm.push_transaction(tx2); vm.push_transaction(tx3); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Revert { .. })); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); let tx = vm.execute(VmExecutionMode::OneTx); - assert!(matches!(tx.result, ExecutionResult::Success { .. })); + assert_matches!(tx.result, ExecutionResult::Success { .. }); let block_tip = vm.execute(VmExecutionMode::Batch); - assert!(matches!(block_tip.result, ExecutionResult::Success { .. })); + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 8991ee1b4b9f..0ecdd6797f4b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -5,9 +5,10 @@ use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, - L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, - VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -34,40 +35,11 @@ pub struct Vm { _phantom: std::marker::PhantomData, } -impl VmInterface for Vm { - type TracerDispatcher = TracerDispatcher; - - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) - } - - /// Execute VM with custom tracers. - fn inspect( - &mut self, - tracer: TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) - } - - /// Get current state of bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory { - self.bootloader_state.bootloader_memory() - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - self.bootloader_state.get_last_tx_compressed_bytecodes() - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.bootloader_state.start_new_l2_block(l2_block_env); +impl Vm { + pub(super) fn gas_remaining(&self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining } - /// Get current state of virtual machine. - /// This method should be used only after the batch execution. - /// Otherwise it can panic. fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); let events: Vec<_> = merge_events(raw_events) @@ -98,6 +70,28 @@ impl VmInterface for Vm { pubdata_costs: Vec::new(), } } +} + +impl VmInterface for Vm { + type TracerDispatcher = TracerDispatcher; + + /// Push tx into memory for the future execution + fn push_transaction(&mut self, tx: Transaction) { + self.push_transaction_with_compression(tx, true) + } + + /// Execute VM with custom tracers. + fn inspect( + &mut self, + tracer: TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.bootloader_state.start_new_l2_block(l2_block_env); + } /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( @@ -105,10 +99,7 @@ impl VmInterface for Vm { tracer: TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -117,17 +108,32 @@ impl VmInterface for Vm { result, ) } else { - (Ok(()), result) + ( + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), + result, + ) } } - fn gas_remaining(&self) -> u32 { - self.state.local_state.callstack.current.ergs_remaining - } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { self.record_vm_memory_metrics_inner() } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let execution_state = self.get_current_execution_state(); + let bootloader_memory = self.bootloader_state.bootloader_memory(); + FinishedL1Batch { + block_tip_execution_result: result, + final_execution_state: execution_state, + final_bootloader_memory: Some(bootloader_memory), + pubdata_input: None, + state_diffs: None, + } + } } impl VmFactory for Vm { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 0e4cefd3c808..cedb4bc8276d 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -4,10 +4,9 @@ use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, versions::shadow::ShadowVm, @@ -56,12 +55,6 @@ impl VmInterface for VmInstance { dispatch_vm!(self.push_transaction(tx)) } - /// Execute the batch without stops after each tx. - /// This method allows to execute the part of the VM cycle after executing all txs. - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { - dispatch_vm!(self.execute(execution_mode)) - } - /// Execute next transaction with custom tracers fn inspect( &mut self, @@ -71,45 +64,17 @@ impl VmInterface for VmInstance { dispatch_vm!(self.inspect(dispatcher.into(), execution_mode)) } - fn get_bootloader_memory(&self) -> BootloaderMemory { - dispatch_vm!(self.get_bootloader_memory()) - } - - /// Get compressed bytecodes of the last executed transaction - fn get_last_tx_compressed_bytecodes(&self) -> Vec { - dispatch_vm!(self.get_last_tx_compressed_bytecodes()) - } - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { dispatch_vm!(self.start_new_l2_block(l2_block_env)) } - fn get_current_execution_state(&self) -> CurrentExecutionState { - dispatch_vm!(self.get_current_execution_state()) - } - - /// Execute transaction with optional bytecode compression. - fn execute_transaction_with_bytecode_compression( - &mut self, - tx: zksync_types::Transaction, - with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { - dispatch_vm!(self.execute_transaction_with_bytecode_compression(tx, with_compression)) - } - /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, dispatcher: Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { dispatch_vm!(self.inspect_transaction_with_bytecode_compression( dispatcher.into(), tx, @@ -121,10 +86,6 @@ impl VmInterface for VmInstance { dispatch_vm!(self.record_vm_memory_metrics()) } - fn gas_remaining(&self) -> u32 { - dispatch_vm!(self.gas_remaining()) - } - /// Return the results of execution of all batch fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_vm!(self.finish_batch()) diff --git a/core/lib/protobuf_config/src/base_token_adjuster.rs b/core/lib/protobuf_config/src/base_token_adjuster.rs index 951feac16533..93c2fcea55bc 100644 --- a/core/lib/protobuf_config/src/base_token_adjuster.rs +++ b/core/lib/protobuf_config/src/base_token_adjuster.rs @@ -42,6 +42,9 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_tx_sending_sleep_ms: self .l1_tx_sending_sleep_ms .unwrap_or(Self::Type::default_l1_tx_sending_sleep_ms()), + l1_update_deviation_percentage: self + .l1_update_deviation_percentage + .unwrap_or(Self::Type::default_l1_update_deviation_percentage()), }) } @@ -53,6 +56,7 @@ impl ProtoRepr for proto::BaseTokenAdjuster { l1_receipt_checking_max_attempts: Some(this.l1_receipt_checking_max_attempts), l1_tx_sending_max_attempts: Some(this.l1_tx_sending_max_attempts), l1_tx_sending_sleep_ms: Some(this.l1_tx_sending_sleep_ms), + l1_update_deviation_percentage: Some(this.l1_update_deviation_percentage), price_fetching_max_attempts: Some(this.price_fetching_max_attempts), price_fetching_sleep_ms: Some(this.price_fetching_sleep_ms), max_tx_gas: Some(this.max_tx_gas), diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index b57f033d0d22..f5eb5c5b2f10 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -6,7 +6,7 @@ use zksync_config::configs::consensus::{ }; use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; -use crate::{proto::consensus as proto, read_optional_repr}; +use crate::{parse_h160, proto::consensus as proto, read_optional_repr}; impl ProtoRepr for proto::WeightedValidator { type Type = WeightedValidator; @@ -65,6 +65,12 @@ impl ProtoRepr for proto::GenesisSpec { .collect::>() .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), + registry_address: self + .registry_address + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("registry_address")?, }) } fn build(this: &Self::Type) -> Self { @@ -74,6 +80,7 @@ impl ProtoRepr for proto::GenesisSpec { validators: this.validators.iter().map(ProtoRepr::build).collect(), attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), + registry_address: this.registry_address.map(|a| format!("{:?}", a)), } } } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs new file mode 100644 index 000000000000..2009d32db17c --- /dev/null +++ b/core/lib/protobuf_config/src/da_client.rs @@ -0,0 +1,61 @@ +use anyhow::Context; +use zksync_config::{ + configs::{ + da_client::DAClient::{Avail, ObjectStore}, + {self}, + }, + AvailConfig, +}; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::{da_client as proto, object_store as object_store_proto}; + +impl ProtoRepr for proto::DataAvailabilityClient { + type Type = configs::DAClientConfig; + + fn read(&self) -> anyhow::Result { + let config = required(&self.config).context("config")?; + + let client = match config { + proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { + api_node_url: required(&conf.api_node_url) + .context("api_node_url")? + .clone(), + bridge_api_url: required(&conf.bridge_api_url) + .context("bridge_api_url")? + .clone(), + seed: required(&conf.seed).context("seed")?.clone(), + app_id: *required(&conf.app_id).context("app_id")?, + timeout: *required(&conf.timeout).context("timeout")? as usize, + max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + }), + proto::data_availability_client::Config::ObjectStore(conf) => { + ObjectStore(object_store_proto::ObjectStore::read(conf)?) + } + }; + + Ok(configs::DAClientConfig { client }) + } + + fn build(this: &Self::Type) -> Self { + match &this.client { + Avail(config) => Self { + config: Some(proto::data_availability_client::Config::Avail( + proto::AvailConfig { + api_node_url: Some(config.api_node_url.clone()), + bridge_api_url: Some(config.bridge_api_url.clone()), + seed: Some(config.seed.clone()), + app_id: Some(config.app_id), + timeout: Some(config.timeout as u64), + max_retries: Some(config.max_retries as u64), + }, + )), + }, + ObjectStore(config) => Self { + config: Some(proto::data_availability_client::Config::ObjectStore( + object_store_proto::ObjectStore::build(config), + )), + }, + } + } +} diff --git a/core/lib/protobuf_config/src/da_dispatcher.rs b/core/lib/protobuf_config/src/da_dispatcher.rs index 1cafa37a1e19..d77073bd32cf 100644 --- a/core/lib/protobuf_config/src/da_dispatcher.rs +++ b/core/lib/protobuf_config/src/da_dispatcher.rs @@ -11,6 +11,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: self.polling_interval_ms, max_rows_to_dispatch: self.max_rows_to_dispatch, max_retries: self.max_retries.map(|x| x as u16), + use_dummy_inclusion_data: self.use_dummy_inclusion_data, }) } @@ -19,6 +20,7 @@ impl ProtoRepr for proto::DataAvailabilityDispatcher { polling_interval_ms: this.polling_interval_ms, max_rows_to_dispatch: this.max_rows_to_dispatch, max_retries: this.max_retries.map(Into::into), + use_dummy_inclusion_data: this.use_dummy_inclusion_data, } } } diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 7b71dec80344..63fa0ca51eb5 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -80,10 +80,7 @@ impl ProtoRepr for proto::VmPlayground { .transpose() .context("fast_vm_mode")? .map_or_else(FastVmMode::default, |mode| mode.parse()), - db_path: self - .db_path - .clone() - .unwrap_or_else(Self::Type::default_db_path), + db_path: self.db_path.clone(), first_processed_batch: L1BatchNumber(self.first_processed_batch.unwrap_or(0)), window_size: NonZeroU32::new(self.window_size.unwrap_or(1)) .context("window_size cannot be 0")?, @@ -94,7 +91,7 @@ impl ProtoRepr for proto::VmPlayground { fn build(this: &Self::Type) -> Self { Self { fast_vm_mode: Some(proto::FastVmMode::new(this.fast_vm_mode).into()), - db_path: Some(this.db_path.clone()), + db_path: this.db_path.clone(), first_processed_batch: Some(this.first_processed_batch.0), window_size: Some(this.window_size.get()), reset: Some(this.reset), diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 87bca88db387..b73539a0897f 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -28,6 +28,7 @@ impl ProtoRepr for proto::GeneralConfig { eth: read_optional_repr(&self.eth), snapshot_creator: read_optional_repr(&self.snapshot_creator), observability: read_optional_repr(&self.observability), + da_client_config: read_optional_repr(&self.da_client), da_dispatcher_config: read_optional_repr(&self.da_dispatcher), protective_reads_writer_config: read_optional_repr(&self.protective_reads_writer), basic_witness_input_producer_config: read_optional_repr( @@ -76,6 +77,7 @@ impl ProtoRepr for proto::GeneralConfig { eth: this.eth.as_ref().map(ProtoRepr::build), snapshot_creator: this.snapshot_creator.as_ref().map(ProtoRepr::build), observability: this.observability.as_ref().map(ProtoRepr::build), + da_client: this.da_client_config.as_ref().map(ProtoRepr::build), da_dispatcher: this.da_dispatcher_config.as_ref().map(ProtoRepr::build), protective_reads_writer: this .protective_reads_writer_config diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 92f639aa224e..59896aa244d8 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -43,6 +43,13 @@ impl ProtoRepr for proto::Genesis { 0.into(), ) }; + // Check either of fields, use old name as a fallback. + let snark_wrapper_vk_hash = match (&prover.snark_wrapper_vk_hash, &prover.recursion_scheduler_level_vk_hash) { + (Some(x), _) => parse_h256(x).context("snark_wrapper_vk_hash")?, + (_, Some(x)) => parse_h256(x).context("recursion_scheduler_level_vk_hash")?, + _ => anyhow::bail!("Either snark_wrapper_vk_hash or recursion_scheduler_level_vk_hash should be presented"), + }; + Ok(Self::Type { protocol_version: Some(protocol_version), genesis_root_hash: Some( @@ -75,9 +82,7 @@ impl ProtoRepr for proto::Genesis { l2_chain_id: required(&self.l2_chain_id) .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) .context("l2_chain_id")?, - recursion_scheduler_level_vk_hash: required(&prover.recursion_scheduler_level_vk_hash) - .and_then(|x| parse_h256(x)) - .context("recursion_scheduler_level_vk_hash")?, + snark_wrapper_vk_hash, fee_account: required(&self.fee_account) .and_then(|x| parse_h160(x)) .context("fee_account")?, @@ -104,11 +109,9 @@ impl ProtoRepr for proto::Genesis { l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), prover: Some(proto::Prover { - recursion_scheduler_level_vk_hash: Some(format!( - "{:?}", - this.recursion_scheduler_level_vk_hash - )), + recursion_scheduler_level_vk_hash: None, // Deprecated field. dummy_verifier: Some(this.dummy_verifier), + snark_wrapper_vk_hash: Some(format!("{:?}", this.snark_wrapper_vk_hash)), }), l1_batch_commit_data_generator_mode: Some( proto::L1BatchCommitDataGeneratorMode::new( diff --git a/core/lib/protobuf_config/src/house_keeper.rs b/core/lib/protobuf_config/src/house_keeper.rs index b6871de853fb..e40fd1b37dc7 100644 --- a/core/lib/protobuf_config/src/house_keeper.rs +++ b/core/lib/protobuf_config/src/house_keeper.rs @@ -12,43 +12,6 @@ impl ProtoRepr for proto::HouseKeeper { &self.l1_batch_metrics_reporting_interval_ms, ) .context("l1_batch_metrics_reporting_interval_ms")?, - gpu_prover_queue_reporting_interval_ms: *required( - &self.gpu_prover_queue_reporting_interval_ms, - ) - .context("gpu_prover_queue_reporting_interval_ms")?, - prover_job_retrying_interval_ms: *required(&self.prover_job_retrying_interval_ms) - .context("prover_job_retrying_interval_ms")?, - prover_stats_reporting_interval_ms: *required(&self.prover_stats_reporting_interval_ms) - .context("prover_stats_reporting_interval_ms")?, - witness_job_moving_interval_ms: *required(&self.witness_job_moving_interval_ms) - .context("witness_job_moving_interval_ms")?, - witness_generator_stats_reporting_interval_ms: *required( - &self.witness_generator_stats_reporting_interval_ms, - ) - .context("witness_generator_stats_reporting_interval_ms")?, - prover_db_pool_size: *required(&self.prover_db_pool_size) - .context("prover_db_pool_size")?, - proof_compressor_job_retrying_interval_ms: *required( - &self.proof_compressor_job_retrying_interval_ms, - ) - .context("proof_compressor_job_retrying_interval_ms")?, - witness_generator_job_retrying_interval_ms: *required( - &self.witness_generator_job_retrying_interval_ms, - ) - .context("witness_generator_job_retrying_interval_ms")?, - proof_compressor_stats_reporting_interval_ms: *required( - &self.proof_compressor_stats_reporting_interval_ms, - ) - .context("proof_compressor_stats_reporting_interval_ms")?, - - // TODO(PLA-862): Make these 2 variables required - prover_job_archiver_archiving_interval_ms: self - .prover_job_archiver_archiving_interval_ms, - prover_job_archiver_archive_after_secs: self.prover_job_archiver_archive_after_secs, - fri_gpu_prover_archiver_archiving_interval_ms: self - .fri_gpu_prover_archiver_archiving_interval_ms, - fri_gpu_prover_archiver_archive_after_secs: self - .fri_gpu_prover_archiver_archive_after_secs, }) } @@ -57,32 +20,6 @@ impl ProtoRepr for proto::HouseKeeper { l1_batch_metrics_reporting_interval_ms: Some( this.l1_batch_metrics_reporting_interval_ms, ), - gpu_prover_queue_reporting_interval_ms: Some( - this.gpu_prover_queue_reporting_interval_ms, - ), - prover_job_retrying_interval_ms: Some(this.prover_job_retrying_interval_ms), - prover_stats_reporting_interval_ms: Some(this.prover_stats_reporting_interval_ms), - witness_job_moving_interval_ms: Some(this.witness_job_moving_interval_ms), - witness_generator_stats_reporting_interval_ms: Some( - this.witness_generator_stats_reporting_interval_ms, - ), - witness_generator_job_retrying_interval_ms: Some( - this.witness_generator_job_retrying_interval_ms, - ), - prover_db_pool_size: Some(this.prover_db_pool_size), - proof_compressor_job_retrying_interval_ms: Some( - this.proof_compressor_job_retrying_interval_ms, - ), - proof_compressor_stats_reporting_interval_ms: Some( - this.proof_compressor_stats_reporting_interval_ms, - ), - prover_job_archiver_archiving_interval_ms: this - .prover_job_archiver_archiving_interval_ms, - prover_job_archiver_archive_after_secs: this.prover_job_archiver_archive_after_secs, - fri_gpu_prover_archiver_archiving_interval_ms: this - .fri_gpu_prover_archiver_archiving_interval_ms, - fri_gpu_prover_archiver_archive_after_secs: this - .fri_gpu_prover_archiver_archive_after_secs, } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index f4d0188ea20f..a4822edbe8e4 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -29,6 +29,7 @@ mod pruning; mod secrets; mod snapshots_creator; +mod da_client; mod external_price_api_client; mod external_proof_integration_api; mod prover_job_monitor; diff --git a/core/lib/protobuf_config/src/observability.rs b/core/lib/protobuf_config/src/observability.rs index dcf87771b587..9a6c31f9223c 100644 --- a/core/lib/protobuf_config/src/observability.rs +++ b/core/lib/protobuf_config/src/observability.rs @@ -30,11 +30,7 @@ impl ProtoRepr for proto::Observability { sentry_url, sentry_environment, log_format: required(&self.log_format).context("log_format")?.clone(), - opentelemetry: self - .opentelemetry - .as_ref() - .map(|cfg| cfg.read().context("opentelemetry")) - .transpose()?, + opentelemetry: self.opentelemetry.as_ref().and_then(|cfg| cfg.read().ok()), log_directives: self.log_directives.clone(), }) } diff --git a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto index 396bd400c04b..6ec81baf51ad 100644 --- a/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto +++ b/core/lib/protobuf_config/src/proto/config/base_token_adjuster.proto @@ -15,4 +15,5 @@ message BaseTokenAdjuster { optional bool halt_on_error = 10; optional uint32 price_fetching_max_attempts = 11; optional uint64 price_fetching_sleep_ms = 12; + optional uint32 l1_update_deviation_percentage = 13; } diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto new file mode 100644 index 000000000000..ef58fbcecb4f --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package zksync.config.da_client; + +import "zksync/config/object_store.proto"; + +message AvailConfig { + optional string api_node_url = 1; + optional string bridge_api_url = 2; + optional string seed = 3; + optional uint32 app_id = 4; + optional uint64 timeout = 5; + optional uint64 max_retries = 6; +} + +message DataAvailabilityClient { + // oneof in protobuf allows for None + oneof config { + AvailConfig avail = 1; + object_store.ObjectStore object_store = 2; + } +} diff --git a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto index d1d913498a4e..dd366bd5b925 100644 --- a/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto +++ b/core/lib/protobuf_config/src/proto/config/da_dispatcher.proto @@ -2,10 +2,9 @@ syntax = "proto3"; package zksync.config.da_dispatcher; -import "zksync/config/object_store.proto"; - message DataAvailabilityDispatcher { optional uint32 polling_interval_ms = 1; optional uint32 max_rows_to_dispatch = 2; optional uint32 max_retries = 3; + optional bool use_dummy_inclusion_data = 4; } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 55fb81b56325..5e1d045ca670 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -28,7 +28,7 @@ enum FastVmMode { // Experimental VM configuration message VmPlayground { optional FastVmMode fast_vm_mode = 1; // optional; if not set, fast VM is not used - optional string db_path = 2; // optional; defaults to `./db/vm_playground` + optional string db_path = 2; // optional; if not set, playground will not use RocksDB cache optional uint32 first_processed_batch = 3; // optional; defaults to 0 optional bool reset = 4; // optional; defaults to false optional uint32 window_size = 5; // optional; non-zero; defaults to 1 diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 3595468949b1..ee70b61b18b3 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -25,6 +25,7 @@ import "zksync/config/external_price_api_client.proto"; import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; import "zksync/config/prover_job_monitor.proto"; +import "zksync/config/da_client.proto"; message GeneralConfig { optional database.Postgres postgres = 1; @@ -60,4 +61,5 @@ message GeneralConfig { optional external_proof_integration_api.ExternalProofIntegrationApi external_proof_integration_api = 43; optional experimental.Vm experimental_vm = 44; optional prover_job_monitor.ProverJobMonitor prover_job_monitor = 45; + optional da_client.DataAvailabilityClient da_client = 46; } diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 6e679d865d92..08cbb954fcbc 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -8,8 +8,9 @@ enum L1BatchCommitDataGeneratorMode { } message Prover { - optional string recursion_scheduler_level_vk_hash = 1; // required; H256 + optional string recursion_scheduler_level_vk_hash = 1; // optional and deprecated, used as alias for `snark_wrapper_vk_hash`; H256 optional bool dummy_verifier = 5; + optional string snark_wrapper_vk_hash = 6; // optional (required if `recursion_scheduler_level_vk_hash` is not set); H256 reserved 2, 3, 4; reserved "recursion_node_level_vk_hash", "recursion_leaf_level_vk_hash", "recursion_circuits_set_vks_hash"; } diff --git a/core/lib/protobuf_config/src/proto/config/house_keeper.proto b/core/lib/protobuf_config/src/proto/config/house_keeper.proto index dce4af95b809..c3a4ca8ad672 100644 --- a/core/lib/protobuf_config/src/proto/config/house_keeper.proto +++ b/core/lib/protobuf_config/src/proto/config/house_keeper.proto @@ -4,17 +4,17 @@ package zksync.config.house_keeper; message HouseKeeper { optional uint64 l1_batch_metrics_reporting_interval_ms = 1; // required; ms - optional uint64 gpu_prover_queue_reporting_interval_ms = 2; // required; ms - optional uint64 prover_job_retrying_interval_ms = 3; // required; ms - optional uint64 prover_stats_reporting_interval_ms = 4; // required ms - optional uint64 witness_job_moving_interval_ms = 5; // required; ms - optional uint64 witness_generator_stats_reporting_interval_ms = 6; // required; ms - optional uint64 witness_generator_job_retrying_interval_ms = 9; // required; ms - optional uint32 prover_db_pool_size = 10; // required - optional uint64 proof_compressor_job_retrying_interval_ms = 12; // required; ms - optional uint64 proof_compressor_stats_reporting_interval_ms = 13; // required; ms - optional uint64 prover_job_archiver_archiving_interval_ms = 14; // optional; ms - optional uint64 prover_job_archiver_archive_after_secs = 15; // optional; seconds - optional uint64 fri_gpu_prover_archiver_archiving_interval_ms = 16; // optional; ms - optional uint64 fri_gpu_prover_archiver_archive_after_secs = 17; // optional; seconds + reserved 2; reserved "gpu_prover_queue_reporting_interval_ms"; + reserved 3; reserved "prover_job_retrying_interval_ms"; + reserved 4; reserved "prover_stats_reporting_interval_ms"; + reserved 5; reserved "witness_job_moving_interval_ms"; + reserved 6; reserved "witness_generator_stats_reporting_interval_ms"; + reserved 9; reserved "witness_generator_job_retrying_interval_ms"; + reserved 10; reserved "prover_db_pool_size"; + reserved 12; reserved "proof_compressor_job_retrying_interval_ms"; + reserved 13; reserved "proof_compressor_stats_reporting_interval_ms"; + reserved 14; reserved "prover_job_archiver_archiving_interval_ms"; + reserved 15; reserved "prover_job_archiver_archive_after_secs"; + reserved 16; reserved "fri_gpu_prover_archiver_archiving_interval_ms"; + reserved 17; reserved "fri_gpu_prover_archiver_archive_after_secs"; } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index c64c993be7c8..835ead1ab65c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -56,6 +56,8 @@ message GenesisSpec { repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey repeated WeightedAttester attesters = 5; // can be empty; attester committee. + // Currently not in consensus genesis, but still a part of the global configuration. + optional string registry_address = 6; // optional; H160 } // Per peer connection RPC rate limits. diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index 695f404f64d1..d653b9b92bfd 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -42,6 +42,7 @@ fn test_encoding() { test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index e4fe566618b8..bc95345bbbaa 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -65,9 +65,6 @@ pub enum SubmitProofRequest { SkippedProofGeneration, } -#[derive(Debug, Serialize, Deserialize)] -pub struct OptionalProofGenerationDataRequest(pub Option); - #[derive(Debug, Serialize, Deserialize)] pub struct VerifyProofRequest(pub Box); diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2d55a140655..a2aee0c2733e 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -170,7 +170,7 @@ fn test_tee_proof_request_serialization() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); let tee_proof_expected = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index ad5361c4608b..205579552a30 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -20,7 +20,7 @@ pub use self::{ }, shadow_storage::ShadowStorage, storage_factory::{ - BatchDiff, OwnedStorage, PgOrRocksdbStorage, ReadStorageFactory, RocksdbWithMemory, + BatchDiff, CommonStorage, OwnedStorage, ReadStorageFactory, RocksdbWithMemory, }, }; diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index f866a22a3e52..30c58ca6a0ef 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -347,7 +347,7 @@ impl RocksdbStorage { let to_l1_batch_number = if let Some(to_l1_batch_number) = to_l1_batch_number { if to_l1_batch_number > latest_l1_batch_number { let err = anyhow::anyhow!( - "Requested to update RocksDB to L1 batch number ({current_l1_batch_number}) that \ + "Requested to update RocksDB to L1 batch number ({to_l1_batch_number}) that \ is greater than the last sealed L1 batch number in Postgres ({latest_l1_batch_number})" ); return Err(err.into()); diff --git a/core/lib/state/src/shadow_storage.rs b/core/lib/state/src/shadow_storage.rs index 28d7b997cd1f..d69491e500f2 100644 --- a/core/lib/state/src/shadow_storage.rs +++ b/core/lib/state/src/shadow_storage.rs @@ -1,10 +1,12 @@ +use std::fmt; + use vise::{Counter, Metrics}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; use zksync_vm_interface::storage::ReadStorage; -#[allow(clippy::struct_field_names)] #[derive(Debug, Metrics)] #[metrics(prefix = "shadow_storage")] +#[allow(clippy::struct_field_names)] // false positive struct ShadowStorageMetrics { /// Number of mismatches when reading a value from a shadow storage. read_value_mismatch: Counter, @@ -19,24 +21,28 @@ struct ShadowStorageMetrics { #[vise::register] static METRICS: vise::Global = vise::Global::new(); -/// [`ReadStorage`] implementation backed by 2 different backends: -/// source_storage -- backend that will return values for function calls and be the source of truth -/// to_check_storage -- secondary storage, which will verify it's own return values against source_storage -/// Note that if to_check_storage value is different than source value, execution continues and metrics/ logs are emitted. +/// [`ReadStorage`] implementation backed by 2 different backends which are compared for each performed operation. +/// +/// - `Ref` is the backend that will return values for function calls and be the source of truth +/// - `Check` is the secondary storage, which will have its return values verified against `Ref` +/// +/// If `Check` value is different from a value from `Ref`, storage behavior depends on the [panic on divergence](Self::set_panic_on_divergence()) flag. +/// If this flag is set (which it is by default), the storage panics; otherwise, execution continues and metrics / logs are emitted. #[derive(Debug)] -pub struct ShadowStorage<'a> { - source_storage: Box, - to_check_storage: Box, - metrics: &'a ShadowStorageMetrics, +pub struct ShadowStorage { + source_storage: Ref, + to_check_storage: Check, + metrics: &'static ShadowStorageMetrics, l1_batch_number: L1BatchNumber, + panic_on_divergence: bool, } -impl<'a> ShadowStorage<'a> { +impl ShadowStorage { /// Creates a new storage using the 2 underlying [`ReadStorage`]s, first as source, the second to be checked /// against the source. pub fn new( - source_storage: Box, - to_check_storage: Box, + source_storage: Ref, + to_check_storage: Check, l1_batch_number: L1BatchNumber, ) -> Self { Self { @@ -44,35 +50,49 @@ impl<'a> ShadowStorage<'a> { to_check_storage, metrics: &METRICS, l1_batch_number, + panic_on_divergence: true, + } + } + + /// Sets behavior if a storage divergence is detected. + pub fn set_panic_on_divergence(&mut self, panic_on_divergence: bool) { + self.panic_on_divergence = panic_on_divergence; + } + + fn error_or_panic(&self, args: fmt::Arguments<'_>) { + if self.panic_on_divergence { + panic!("{args}"); + } else { + tracing::error!(l1_batch_number = self.l1_batch_number.0, "{args}"); } } } -impl ReadStorage for ShadowStorage<'_> { +impl ReadStorage for ShadowStorage { fn read_value(&mut self, key: &StorageKey) -> StorageValue { - let source_value = self.source_storage.as_mut().read_value(key); - let expected_value = self.to_check_storage.as_mut().read_value(key); + let source_value = self.source_storage.read_value(key); + let expected_value = self.to_check_storage.read_value(key); if source_value != expected_value { self.metrics.read_value_mismatch.inc(); - tracing::error!( + self.error_or_panic(format_args!( "read_value({key:?}) -- l1_batch_number={:?} -- expected source={source_value:?} \ to be equal to to_check={expected_value:?}", self.l1_batch_number - ); + )); } source_value } fn is_write_initial(&mut self, key: &StorageKey) -> bool { - let source_value = self.source_storage.as_mut().is_write_initial(key); - let expected_value = self.to_check_storage.as_mut().is_write_initial(key); + let source_value = self.source_storage.is_write_initial(key); + let expected_value = self.to_check_storage.is_write_initial(key); if source_value != expected_value { self.metrics.is_write_initial_mismatch.inc(); - tracing::error!( + self.error_or_panic(format_args!( "is_write_initial({key:?}) -- l1_batch_number={:?} -- expected source={source_value:?} \ to be equal to to_check={expected_value:?}", self.l1_batch_number - ); + )); } source_value } @@ -82,25 +102,25 @@ impl ReadStorage for ShadowStorage<'_> { let expected_value = self.to_check_storage.load_factory_dep(hash); if source_value != expected_value { self.metrics.load_factory_dep_mismatch.inc(); - tracing::error!( + self.error_or_panic(format_args!( "load_factory_dep({hash:?}) -- l1_batch_number={:?} -- expected source={source_value:?} \ to be equal to to_check={expected_value:?}", - self.l1_batch_number - ); + self.l1_batch_number + )); } source_value } fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - let source_value = self.source_storage.as_mut().get_enumeration_index(key); - let expected_value = self.to_check_storage.as_mut().get_enumeration_index(key); + let source_value = self.source_storage.get_enumeration_index(key); + let expected_value = self.to_check_storage.get_enumeration_index(key); if source_value != expected_value { - tracing::error!( + self.metrics.get_enumeration_index_mismatch.inc(); + self.error_or_panic(format_args!( "get_enumeration_index({key:?}) -- l1_batch_number={:?} -- \ expected source={source_value:?} to be equal to to_check={expected_value:?}", self.l1_batch_number - ); - self.metrics.get_enumeration_index_mismatch.inc(); + )); } source_value } diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory.rs index e2b5275c48d5..2ef9b249af2e 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, +}; use anyhow::Context as _; use async_trait::async_trait; @@ -6,12 +9,13 @@ use tokio::{runtime::Handle, sync::watch}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_storage::RocksDB; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; -use zksync_vm_interface::storage::ReadStorage; +use zksync_utils::u256_to_h256; +use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot, StorageWithSnapshot}; use crate::{PostgresStorage, RocksdbStorage, RocksdbStorageBuilder, StateKeeperColumnFamily}; /// Storage with a static lifetime that can be sent to Tokio tasks etc. -pub type OwnedStorage = PgOrRocksdbStorage<'static>; +pub type OwnedStorage = CommonStorage<'static>; /// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param /// (mostly for testing purposes); the default is [`OwnedStorage`]. @@ -40,7 +44,7 @@ impl ReadStorageFactory for ConnectionPool { ) -> anyhow::Result> { let connection = self.connection().await?; let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; - Ok(Some(storage)) + Ok(Some(storage.into())) } } @@ -65,19 +69,34 @@ pub struct RocksdbWithMemory { pub batch_diffs: Vec, } -/// A [`ReadStorage`] implementation that uses either [`PostgresStorage`] or [`RocksdbStorage`] -/// underneath. +/// Union of all [`ReadStorage`] implementations that are returned by [`ReadStorageFactory`], such as +/// Postgres- and RocksDB-backed storages. +/// +/// Ordinarily, you might want to use the [`OwnedStorage`] type alias instead of using `CommonStorage` directly. +/// The former naming signals that the storage has static lifetime and thus can be sent to Tokio tasks or other threads. #[derive(Debug)] -pub enum PgOrRocksdbStorage<'a> { +pub enum CommonStorage<'a> { /// Implementation over a Postgres connection. Postgres(PostgresStorage<'a>), /// Implementation over a RocksDB cache instance. Rocksdb(RocksdbStorage), /// Implementation over a RocksDB cache instance with in-memory DB diffs. RocksdbWithMemory(RocksdbWithMemory), + /// In-memory storage snapshot with the Postgres storage fallback. + Snapshot(StorageWithSnapshot>), + /// Generic implementation. Should be used for testing purposes only since it has performance penalty because + /// of the dynamic dispatch. + Boxed(Box), } -impl PgOrRocksdbStorage<'static> { +impl<'a> CommonStorage<'a> { + /// Creates a boxed storage. Should be used for testing purposes only. + pub fn boxed(storage: impl ReadStorage + Send + 'a) -> Self { + Self::Boxed(Box::new(storage)) + } +} + +impl CommonStorage<'static> { /// Creates a Postgres-based storage. Because of the `'static` lifetime requirement, `connection` must be /// non-transactional. /// @@ -87,7 +106,7 @@ impl PgOrRocksdbStorage<'static> { pub async fn postgres( mut connection: Connection<'static, Core>, l1_batch_number: L1BatchNumber, - ) -> anyhow::Result { + ) -> anyhow::Result> { let l2_block_number = if let Some((_, l2_block_number)) = connection .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) @@ -110,11 +129,7 @@ impl PgOrRocksdbStorage<'static> { snapshot_recovery.l2_block_number }; tracing::debug!(%l1_batch_number, %l2_block_number, "Using Postgres-based storage"); - Ok( - PostgresStorage::new_async(Handle::current(), connection, l2_block_number, true) - .await? - .into(), - ) + PostgresStorage::new_async(Handle::current(), connection, l2_block_number, true).await } /// Catches up RocksDB synchronously (i.e. assumes the gap is small) and @@ -153,6 +168,92 @@ impl PgOrRocksdbStorage<'static> { tracing::debug!(%rocksdb_l1_batch_number, "Using RocksDB-based storage"); Ok(Some(rocksdb.into())) } + + /// Creates a storage snapshot. Require protective reads to be persisted for the batch, otherwise + /// will return `Ok(None)`. + #[tracing::instrument(skip(connection))] + pub async fn snapshot( + connection: &mut Connection<'static, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let Some(header) = connection + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await? + else { + return Ok(None); + }; + let bytecode_hashes: HashSet<_> = header + .used_contract_hashes + .into_iter() + .map(u256_to_h256) + .collect(); + + // Check protective reads early on. + let protective_reads = connection + .storage_logs_dedup_dal() + .get_protective_reads_for_l1_batch(l1_batch_number) + .await?; + if protective_reads.is_empty() { + tracing::debug!("No protective reads for batch"); + return Ok(None); + } + let protective_reads_len = protective_reads.len(); + tracing::debug!("Loaded {protective_reads_len} protective reads"); + + let touched_slots = connection + .storage_logs_dal() + .get_touched_slots_for_l1_batch(l1_batch_number) + .await?; + tracing::debug!("Loaded {} touched keys", touched_slots.len()); + + let all_accessed_keys: Vec<_> = protective_reads + .into_iter() + .map(|key| key.hashed_key()) + .chain(touched_slots.into_keys()) + .collect(); + let previous_values = connection + .storage_logs_dal() + .get_previous_storage_values(&all_accessed_keys, l1_batch_number) + .await?; + tracing::debug!( + "Obtained {} previous values for accessed keys", + previous_values.len() + ); + let initial_write_info = connection + .storage_logs_dal() + .get_l1_batches_and_indices_for_initial_writes(&all_accessed_keys) + .await?; + tracing::debug!("Obtained initial write info for accessed keys"); + + let bytecodes = connection + .factory_deps_dal() + .get_factory_deps(&bytecode_hashes) + .await; + tracing::debug!("Loaded {} bytecodes used in the batch", bytecodes.len()); + let factory_deps = bytecodes + .into_iter() + .map(|(hash_u256, words)| { + let bytes: Vec = words.into_iter().flatten().collect(); + (u256_to_h256(hash_u256), bytes) + }) + .collect(); + + let storage = previous_values.into_iter().map(|(key, prev_value)| { + let prev_value = prev_value.unwrap_or_default(); + let enum_index = + initial_write_info + .get(&key) + .copied() + .and_then(|(l1_batch, enum_index)| { + // Filter out enum indexes assigned "in the future" + (l1_batch < l1_batch_number).then_some(enum_index) + }); + (key, enum_index.map(|idx| (prev_value, idx))) + }); + let storage = storage.collect(); + Ok(Some(StorageSnapshot::new(storage, factory_deps))) + } } impl ReadStorage for RocksdbWithMemory { @@ -203,12 +304,14 @@ impl ReadStorage for RocksdbWithMemory { } } -impl ReadStorage for PgOrRocksdbStorage<'_> { +impl ReadStorage for CommonStorage<'_> { fn read_value(&mut self, key: &StorageKey) -> StorageValue { match self { Self::Postgres(postgres) => postgres.read_value(key), Self::Rocksdb(rocksdb) => rocksdb.read_value(key), Self::RocksdbWithMemory(rocksdb_mem) => rocksdb_mem.read_value(key), + Self::Snapshot(snapshot) => snapshot.read_value(key), + Self::Boxed(storage) => storage.read_value(key), } } @@ -217,6 +320,8 @@ impl ReadStorage for PgOrRocksdbStorage<'_> { Self::Postgres(postgres) => postgres.is_write_initial(key), Self::Rocksdb(rocksdb) => rocksdb.is_write_initial(key), Self::RocksdbWithMemory(rocksdb_mem) => rocksdb_mem.is_write_initial(key), + Self::Snapshot(snapshot) => snapshot.is_write_initial(key), + Self::Boxed(storage) => storage.is_write_initial(key), } } @@ -225,6 +330,8 @@ impl ReadStorage for PgOrRocksdbStorage<'_> { Self::Postgres(postgres) => postgres.load_factory_dep(hash), Self::Rocksdb(rocksdb) => rocksdb.load_factory_dep(hash), Self::RocksdbWithMemory(rocksdb_mem) => rocksdb_mem.load_factory_dep(hash), + Self::Snapshot(snapshot) => snapshot.load_factory_dep(hash), + Self::Boxed(storage) => storage.load_factory_dep(hash), } } @@ -233,18 +340,26 @@ impl ReadStorage for PgOrRocksdbStorage<'_> { Self::Postgres(postgres) => postgres.get_enumeration_index(key), Self::Rocksdb(rocksdb) => rocksdb.get_enumeration_index(key), Self::RocksdbWithMemory(rocksdb_mem) => rocksdb_mem.get_enumeration_index(key), + Self::Snapshot(snapshot) => snapshot.get_enumeration_index(key), + Self::Boxed(storage) => storage.get_enumeration_index(key), } } } -impl<'a> From> for PgOrRocksdbStorage<'a> { +impl<'a> From> for CommonStorage<'a> { fn from(value: PostgresStorage<'a>) -> Self { Self::Postgres(value) } } -impl<'a> From for PgOrRocksdbStorage<'a> { +impl From for CommonStorage<'_> { fn from(value: RocksdbStorage) -> Self { Self::Rocksdb(value) } } + +impl<'a> From>> for CommonStorage<'a> { + fn from(value: StorageWithSnapshot>) -> Self { + Self::Snapshot(value) + } +} diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index a56f383bdbad..6828eeef8b10 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -14,12 +14,9 @@ categories.workspace = true zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true -zksync_dal.workspace = true -zksync_db_connection.workspace = true zksync_merkle_tree.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true zksync_types.workspace = true zksync_utils.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 4234754a75f2..8728a4e52749 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -244,7 +244,7 @@ fn execute_tx( // Attempt to run VM with bytecode compression on. vm.make_snapshot(); if vm - .execute_transaction_with_bytecode_compression(tx.clone(), true) + .inspect_transaction_with_bytecode_compression(Default::default(), tx.clone(), true) .0 .is_ok() { @@ -255,7 +255,7 @@ fn execute_tx( // If failed with bytecode compression, attempt to run without bytecode compression. vm.rollback_to_the_latest_snapshot(); if vm - .execute_transaction_with_bytecode_compression(tx.clone(), false) + .inspect_transaction_with_bytecode_compression(Default::default(), tx.clone(), false) .0 .is_err() { diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index c80f304a75a6..55cbef761ad5 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -43,6 +43,7 @@ blake2.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros"] } +assert_matches.workspace = true bincode.workspace = true [build-dependencies] diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index bf26caddd07b..9391c8627573 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -44,10 +44,23 @@ pub struct SyncBlock { pub protocol_version: ProtocolVersionId, } +/// Global configuration of the consensus served by the main node to the external nodes. +/// In particular, it contains consensus genesis. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::GlobalConfig`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusGlobalConfig(pub serde_json::Value); + +/// [DEPRECATED] Genesis served by the main node to the external nodes. +/// This type is deprecated since ConsensusGlobalConfig also contains genesis and is extensible. +/// +/// The wrapped JSON value corresponds to `zksync_consensus_roles::validator::Genesis`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConsensusGenesis(pub serde_json::Value); /// AttestationStatus maintained by the main node. /// Used for testing L1 batch signing by consensus attesters. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); diff --git a/core/lib/types/src/contract_verification_api.rs b/core/lib/types/src/contract_verification_api.rs index 588de3cb675e..8ee1d3ec6491 100644 --- a/core/lib/types/src/contract_verification_api.rs +++ b/core/lib/types/src/contract_verification_api.rs @@ -243,32 +243,31 @@ pub enum DeployContractCalldata { #[cfg(test)] mod tests { + use assert_matches::assert_matches; + use super::SourceCodeData; #[test] fn source_code_deserialization() { let single_file_str = r#"{"codeFormat": "solidity-single-file", "sourceCode": "text"}"#; let single_file_result = serde_json::from_str::(single_file_str); - assert!(matches!( - single_file_result, - Ok(SourceCodeData::SolSingleFile(_)) - )); + assert_matches!(single_file_result, Ok(SourceCodeData::SolSingleFile(_))); let stand_json_input_str = r#"{"codeFormat": "solidity-standard-json-input", "sourceCode": {}}"#; let stand_json_input_result = serde_json::from_str::(stand_json_input_str); - assert!(matches!( + assert_matches!( stand_json_input_result, Ok(SourceCodeData::StandardJsonInput(_)) - )); + ); let type_not_specified_str = r#"{"sourceCode": "text"}"#; let type_not_specified_result = serde_json::from_str::(type_not_specified_str); - assert!(matches!( + assert_matches!( type_not_specified_result, Ok(SourceCodeData::SolSingleFile(_)) - )); + ); let type_not_specified_object_str = r#"{"sourceCode": {}}"#; let type_not_specified_object_result = diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index bc9bd7667e82..1afb108a0536 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -282,14 +282,14 @@ impl ProtocolVersion { pub fn apply_upgrade( &self, upgrade: ProtocolUpgrade, - new_scheduler_vk_hash: Option, + new_snark_wrapper_vk_hash: Option, ) -> ProtocolVersion { ProtocolVersion { version: upgrade.version, timestamp: upgrade.timestamp, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: new_scheduler_vk_hash - .unwrap_or(self.l1_verifier_config.recursion_scheduler_level_vk_hash), + snark_wrapper_vk_hash: new_snark_wrapper_vk_hash + .unwrap_or(self.l1_verifier_config.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: upgrade diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 887dfcbff378..c71e6e4206c5 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -980,6 +980,7 @@ pub fn validate_factory_deps( #[cfg(test)] mod tests { + use assert_matches::assert_matches; use zksync_crypto_primitives::K256PrivateKey; use super::*; @@ -1427,10 +1428,10 @@ mod tests { tx.s = Some(U256::from_big_endian(signature.s())); let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); - assert!(matches!( + assert_matches!( L2Tx::from_request(request.0, random_tx_max_size), Err(SerializationTransactionError::OversizedData(_, _)) - )) + ) } #[test] @@ -1456,10 +1457,10 @@ mod tests { let try_to_l2_tx: Result = L2Tx::from_request(call_request.into(), random_tx_max_size); - assert!(matches!( + assert_matches!( try_to_l2_tx, Err(SerializationTransactionError::OversizedData(_, _)) - )); + ); } #[test] diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 5ec27380df5b..b87b2ad98964 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -25,7 +25,6 @@ thiserror.workspace = true futures.workspace = true hex.workspace = true reqwest = { workspace = true, features = ["blocking"] } -itertools.workspace = true serde_json.workspace = true once_cell.workspace = true @@ -33,3 +32,4 @@ once_cell.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros", "rt"] } bincode.workspace = true +assert_matches.workspace = true diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 0eddc6c2cd64..5ae07caf1486 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -8,6 +8,87 @@ use once_cell::sync::OnceCell; static WORKSPACE: OnceCell> = OnceCell::new(); +/// Represents Cargo workspaces available in the repository. +#[derive(Debug, Clone, Copy)] +pub enum Workspace<'a> { + /// Workspace was not found. + /// Assumes that the code is running in a binary. + /// Will use the current directory as a fallback. + None, + /// Root folder. + Core(&'a Path), + /// `prover` folder. + Prover(&'a Path), + /// `toolbox` folder. + Toolbox(&'a Path), +} + +impl Workspace<'static> { + /// Find the location of the current workspace, if this code works in workspace + /// then it will return the correct folder if, it's binary e.g. in docker container + /// you have to use fallback to another directory + /// The code has been inspired by `insta` + /// `https://github.com/mitsuhiko/insta/blob/master/insta/src/env.rs` + pub fn locate() -> Self { + // Since `locate_workspace_inner()` should be deterministic, it makes little sense to call + // `OnceCell::get_or_try_init()` here; the repeated calls are just as unlikely to succeed as the initial call. + // Instead, we store `None` in the `OnceCell` if initialization failed. + let path: Option<&'static Path> = WORKSPACE + .get_or_init(|| { + let result = locate_workspace_inner(); + // If the workspace is not found, we store `None` in the `OnceCell`. + // It doesn't make sense to log it, since in most production cases the workspace + // is not present. + result.ok() + }) + .as_deref(); + path.map_or(Self::None, Self::from) + } +} + +impl<'a> Workspace<'a> { + const PROVER_DIRECTORY_NAME: &'static str = "prover"; + const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + + /// Returns the path of the core workspace. + /// For `Workspace::None`, considers the current directory to represent core workspace. + pub fn core(self) -> PathBuf { + match self { + Self::None => PathBuf::from("."), + Self::Core(path) => path.into(), + Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + } + } + + /// Returns the path of the `prover` workspace. + pub fn prover(self) -> PathBuf { + match self { + Self::Prover(path) => path.into(), + _ => self.core().join(Self::PROVER_DIRECTORY_NAME), + } + } + + /// Returns the path of the `zk_toolbox`` workspace. + pub fn toolbox(self) -> PathBuf { + match self { + Self::Toolbox(path) => path.into(), + _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + } + } +} + +impl<'a> From<&'a Path> for Workspace<'a> { + fn from(path: &'a Path) -> Self { + if path.ends_with(Self::PROVER_DIRECTORY_NAME) { + Self::Prover(path) + } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { + Self::Toolbox(path) + } else { + Self::Core(path) + } + } +} + fn locate_workspace_inner() -> anyhow::Result { let output = std::process::Command::new( std::env::var("CARGO") @@ -40,31 +121,86 @@ fn locate_workspace_inner() -> anyhow::Result { .to_path_buf()) } -/// Find the location of the current workspace, if this code works in workspace -/// then it will return the correct folder if, it's binary e.g. in docker container -/// you have to use fallback to another directory -/// The code has been inspired by `insta` -/// `https://github.com/mitsuhiko/insta/blob/master/insta/src/env.rs` -pub fn locate_workspace() -> Option<&'static Path> { - // Since `locate_workspace_inner()` should be deterministic, it makes little sense to call - // `OnceCell::get_or_try_init()` here; the repeated calls are just as unlikely to succeed as the initial call. - // Instead, we store `None` in the `OnceCell` if initialization failed. - WORKSPACE - .get_or_init(|| { - let result = locate_workspace_inner(); - if result.is_err() { - // `get_or_init()` is guaranteed to call the provided closure once per `OnceCell`; - // i.e., we won't spam logs here. - tracing::info!( - "locate_workspace() failed. You are using an already compiled version" - ); - } - result.ok() - }) - .as_deref() -} +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + + /// Will reset the pwd on drop. + /// This is needed to make sure that even if the test fails, the env + /// for other tests is left intact. + struct PwdProtector(PathBuf); + + impl PwdProtector { + fn new() -> Self { + let pwd = std::env::current_dir().unwrap(); + Self(pwd) + } + } + + impl Drop for PwdProtector { + fn drop(&mut self) { + std::env::set_current_dir(self.0.clone()).unwrap(); + } + } + + #[test] + fn test_workspace_locate() { + let _pwd_protector = PwdProtector::new(); + + // Core. + + let workspace = Workspace::locate(); + assert_matches!(workspace, Workspace::Core(_)); + let core_path = workspace.core(); + // Check if prover and toolbox directories exist. + assert!(workspace.prover().exists()); + assert_matches!( + Workspace::from(workspace.prover().as_path()), + Workspace::Prover(_) + ); + assert!(workspace.toolbox().exists()); + assert_matches!( + Workspace::from(workspace.toolbox().as_path()), + Workspace::Toolbox(_) + ); + + // Prover. + + // We use `cargo-nextest` for running tests, which runs each test in parallel, + // so we can safely alter the global env, assuming that we will restore it after + // the test. + std::env::set_current_dir(workspace.prover()).unwrap(); + let workspace_path = locate_workspace_inner().unwrap(); + let workspace = Workspace::from(workspace_path.as_path()); + assert_matches!(workspace, Workspace::Prover(_)); + let prover_path = workspace.prover(); + assert_eq!(workspace.core(), core_path); + assert_matches!( + Workspace::from(workspace.core().as_path()), + Workspace::Core(_) + ); + assert!(workspace.toolbox().exists()); + assert_matches!( + Workspace::from(workspace.toolbox().as_path()), + Workspace::Toolbox(_) + ); -/// Returns [`locate_workspace()`] output with the "." fallback. -pub fn workspace_dir_or_current_dir() -> &'static Path { - locate_workspace().unwrap_or_else(|| Path::new(".")) + // Toolbox. + std::env::set_current_dir(workspace.toolbox()).unwrap(); + let workspace_path = locate_workspace_inner().unwrap(); + let workspace = Workspace::from(workspace_path.as_path()); + assert_matches!(workspace, Workspace::Toolbox(_)); + assert_eq!(workspace.core(), core_path); + assert_matches!( + Workspace::from(workspace.core().as_path()), + Workspace::Core(_) + ); + assert_eq!(workspace.prover(), prover_path); + assert_matches!( + Workspace::from(workspace.prover().as_path()), + Workspace::Prover(_) + ); + } } diff --git a/core/lib/utils/src/lib.rs b/core/lib/utils/src/lib.rs index 7f9304e3110c..92a1d7a0c470 100644 --- a/core/lib/utils/src/lib.rs +++ b/core/lib/utils/src/lib.rs @@ -2,7 +2,7 @@ pub mod bytecode; mod convert; -mod env; +pub mod env; pub mod http_with_retries; pub mod misc; pub mod panic_extractor; @@ -10,4 +10,4 @@ mod serde_wrappers; pub mod time; pub mod wait_for_tasks; -pub use self::{convert::*, env::*, misc::*, serde_wrappers::*}; +pub use self::{convert::*, misc::*, serde_wrappers::*}; diff --git a/core/lib/vm_utils/Cargo.toml b/core/lib/vm_executor/Cargo.toml similarity index 64% rename from core/lib/vm_utils/Cargo.toml rename to core/lib/vm_executor/Cargo.toml index cb12e7c8f673..089c2a9bcca7 100644 --- a/core/lib/vm_utils/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "zksync_vm_utils" -description = "ZKsync VM utilities" +name = "zksync_vm_executor" +description = "Implementations of ZKsync VM executors" version.workspace = true edition.workspace = true authors.workspace = true @@ -14,8 +14,12 @@ categories.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true zksync_types.workspace = true -zksync_vm_interface.workspace = true +zksync_multivm.workspace = true +zksync_utils.workspace = true +async-trait.workspace = true +once_cell.workspace = true tokio.workspace = true anyhow.workspace = true tracing.workspace = true +vise.workspace = true diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/lib/vm_executor/src/batch/executor.rs similarity index 53% rename from core/node/state_keeper/src/batch_executor/mod.rs rename to core/lib/vm_executor/src/batch/executor.rs index 235a8f581c82..6dc9354fd7db 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/lib/vm_executor/src/batch/executor.rs @@ -1,82 +1,32 @@ -use std::{error::Error as StdError, fmt, sync::Arc}; +use std::{error::Error as StdError, sync::Arc}; use anyhow::Context as _; +use async_trait::async_trait; use tokio::{ sync::{mpsc, oneshot}, task::JoinHandle, }; use zksync_multivm::interface::{ - storage::StorageViewCache, Call, CompressedBytecodeInfo, FinishedL1Batch, Halt, L1BatchEnv, - L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, + executor::BatchExecutor, + storage::{ReadStorage, StorageView}, + BatchTransactionExecutionResult, FinishedL1Batch, L2BlockEnv, }; -use zksync_state::OwnedStorage; use zksync_types::Transaction; -use crate::{ - metrics::{ExecutorCommand, EXECUTOR_METRICS}, - types::ExecutionMetricsForCriteria, -}; - -pub mod main_executor; -#[cfg(test)] -mod tests; - -/// Representation of a transaction executed in the virtual machine. -#[derive(Debug, Clone)] -pub enum TxExecutionResult { - /// Successful execution of the tx and the block tip dry run. - Success { - tx_result: Box, - tx_metrics: Box, - compressed_bytecodes: Vec, - call_tracer_result: Vec, - gas_remaining: u32, - }, - /// The VM rejected the tx for some reason. - RejectedByVm { reason: Halt }, - /// Bootloader gas limit is not enough to execute the tx. - BootloaderOutOfGasForTx, -} - -impl TxExecutionResult { - /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. - pub(super) fn err(&self) -> Option<&Halt> { - match self { - Self::Success { .. } => None, - Self::RejectedByVm { - reason: rejection_reason, - } => Some(rejection_reason), - Self::BootloaderOutOfGasForTx => Some(&Halt::BootloaderOutOfGas), - } - } -} - -/// An abstraction that allows us to create different kinds of batch executors. -/// The only requirement is to return a [`BatchExecutorHandle`], which does its work -/// by communicating with the externally initialized thread. -/// -/// This type is generic over the storage type accepted to create the VM instance, mostly for testing purposes. -pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { - fn init_batch( - &mut self, - storage: S, - l1_batch_params: L1BatchEnv, - system_env: SystemEnv, - ) -> BatchExecutorHandle; -} +use super::metrics::{ExecutorCommand, EXECUTOR_METRICS}; #[derive(Debug)] -enum HandleOrError { - Handle(JoinHandle>), +enum HandleOrError { + Handle(JoinHandle>>), Err(Arc), } -impl HandleOrError { +impl HandleOrError { async fn wait_for_error(&mut self) -> anyhow::Error { let err_arc = match self { Self::Handle(handle) => { let err = match handle.await { - Ok(Ok(())) => anyhow::anyhow!("batch executor unexpectedly stopped"), + Ok(Ok(_)) => anyhow::anyhow!("batch executor unexpectedly stopped"), Ok(Err(err)) => err, Err(err) => anyhow::Error::new(err).context("batch executor panicked"), }; @@ -90,7 +40,7 @@ impl HandleOrError { anyhow::Error::new(err_arc) } - async fn wait(self) -> anyhow::Result<()> { + async fn wait(self) -> anyhow::Result> { match self { Self::Handle(handle) => handle.await.context("batch executor panicked")?, Self::Err(err_arc) => Err(anyhow::Error::new(err_arc)), @@ -98,21 +48,16 @@ impl HandleOrError { } } -/// A public interface for interaction with the `BatchExecutor`. -/// `BatchExecutorHandle` is stored in the state keeper and is used to invoke or rollback transactions, and also seal -/// the batches. +/// "Main" [`BatchExecutor`] implementation instantiating a VM in a blocking Tokio thread. #[derive(Debug)] -pub struct BatchExecutorHandle { - handle: HandleOrError, +pub struct MainBatchExecutor { + handle: HandleOrError, commands: mpsc::Sender, } -impl BatchExecutorHandle { - /// Creates a batch executor handle from the provided sender and thread join handle. - /// Can be used to inject an alternative batch executor implementation. - #[doc(hidden)] - pub(super) fn from_raw( - handle: JoinHandle>, +impl MainBatchExecutor { + pub(super) fn new( + handle: JoinHandle>>, commands: mpsc::Sender, ) -> Self { Self { @@ -120,9 +65,18 @@ impl BatchExecutorHandle { commands, } } +} +#[async_trait] +impl BatchExecutor for MainBatchExecutor +where + S: ReadStorage + Send + 'static, +{ #[tracing::instrument(skip_all)] - pub async fn execute_tx(&mut self, tx: Transaction) -> anyhow::Result { + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result { let tx_gas_limit = tx.gas_limit().as_u64(); let (response_sender, response_receiver) = oneshot::channel(); @@ -144,9 +98,9 @@ impl BatchExecutorHandle { }; let elapsed = latency.observe(); - if let TxExecutionResult::Success { tx_metrics, .. } = &res { - let gas_per_nanosecond = tx_metrics.execution_metrics.computational_gas_used as f64 - / elapsed.as_nanos() as f64; + if !res.tx_result.result.is_failed() { + let gas_per_nanosecond = + res.tx_result.statistics.computational_gas_used as f64 / elapsed.as_nanos() as f64; EXECUTOR_METRICS .computational_gas_per_nanosecond .observe(gas_per_nanosecond); @@ -162,13 +116,13 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()> { + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands - .send(Command::StartNextL2Block(env, response_sender)) + .send(Command::RollbackLastTx(response_sender)) .await .is_err(); if send_failed { @@ -176,7 +130,7 @@ impl BatchExecutorHandle { } let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::StartNextL2Block] + [&ExecutorCommand::RollbackLastTx] .start(); if response_receiver.await.is_err() { return Err(self.handle.wait_for_error().await); @@ -186,13 +140,13 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()> { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands - .send(Command::RollbackLastTx(response_sender)) + .send(Command::StartNextL2Block(env, response_sender)) .await .is_err(); if send_failed { @@ -200,7 +154,7 @@ impl BatchExecutorHandle { } let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::RollbackLastTx] + [&ExecutorCommand::StartNextL2Block] .start(); if response_receiver.await.is_err() { return Err(self.handle.wait_for_error().await); @@ -210,7 +164,9 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn finish_batch(mut self) -> anyhow::Result { + async fn finish_batch( + mut self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands @@ -228,44 +184,19 @@ impl BatchExecutorHandle { Ok(batch) => batch, Err(_) => return Err(self.handle.wait_for_error().await), }; - self.handle.wait().await?; - latency.observe(); - Ok(finished_batch) - } - - pub async fn finish_batch_with_cache( - mut self, - ) -> anyhow::Result<(FinishedL1Batch, StorageViewCache)> { - let (response_sender, response_receiver) = oneshot::channel(); - let send_failed = self - .commands - .send(Command::FinishBatchWithCache(response_sender)) - .await - .is_err(); - if send_failed { - return Err(self.handle.wait_for_error().await); - } - - let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::FinishBatchWithCache] - .start(); - let batch_with_cache = match response_receiver.await { - Ok(batch_with_cache) => batch_with_cache, - Err(_) => return Err(self.handle.wait_for_error().await), - }; - - self.handle.wait().await?; - latency.observe(); - Ok(batch_with_cache) + let storage_view = self.handle.wait().await?; + Ok((finished_batch, storage_view)) } } #[derive(Debug)] pub(super) enum Command { - ExecuteTx(Box, oneshot::Sender), + ExecuteTx( + Box, + oneshot::Sender, + ), StartNextL2Block(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), FinishBatch(oneshot::Sender), - FinishBatchWithCache(oneshot::Sender<(FinishedL1Batch, StorageViewCache)>), } diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/lib/vm_executor/src/batch/factory.rs similarity index 65% rename from core/node/state_keeper/src/batch_executor/main_executor.rs rename to core/lib/vm_executor/src/batch/factory.rs index db4daeb77444..68a3769ee622 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,31 +1,31 @@ -use std::sync::Arc; +use std::{marker::PhantomData, rc::Rc, sync::Arc}; use anyhow::Context as _; use once_cell::sync::OnceCell; use tokio::sync::mpsc; use zksync_multivm::{ interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, storage::{ReadStorage, StorageView}, - Call, CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, - L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, }, tracers::CallTracer, vm_latest::HistoryEnabled, MultiVMTracer, VmInstance, }; -use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_types::{vm::FastVmMode, Transaction}; -use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; -use crate::{ +use super::{ + executor::{Command, MainBatchExecutor}, metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, - types::ExecutionMetricsForCriteria, }; +use crate::shared::InteractionType; -/// The default implementation of [`BatchExecutor`]. -/// Creates a "real" batch executor which maintains the VM (as opposed to the test builder which doesn't use the VM). +/// The default implementation of [`BatchExecutorFactory`]. +/// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). #[derive(Debug, Clone)] -pub struct MainBatchExecutor { +pub struct MainBatchExecutorFactory { save_call_traces: bool, /// Whether batch executor would allow transactions with bytecode that cannot be compressed. /// For new blocks, bytecode compression is mandatory -- if bytecode compression is not supported, @@ -37,7 +37,7 @@ pub struct MainBatchExecutor { fast_vm_mode: FastVmMode, } -impl MainBatchExecutor { +impl MainBatchExecutorFactory { pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { Self { save_call_traces, @@ -56,13 +56,13 @@ impl MainBatchExecutor { } } -impl BatchExecutor for MainBatchExecutor { +impl BatchExecutorFactory for MainBatchExecutorFactory { fn init_batch( &mut self, storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle { + ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. let (commands_sender, commands_receiver) = mpsc::channel(1); @@ -71,21 +71,15 @@ impl BatchExecutor for MainBatchExecutor { optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, commands: commands_receiver, + _storage: PhantomData, }; let handle = tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); - BatchExecutorHandle::from_raw(handle, commands_sender) + Box::new(MainBatchExecutor::new(handle, commands_sender)) } } -#[derive(Debug)] -struct TransactionOutput { - tx_result: VmExecutionResultAndLogs, - compressed_bytecodes: Vec, - calls: Vec, -} - /// Implementation of the "primary" (non-test) batch executor. /// Upon launch, it initializes the VM object with provided block context and properties, and keeps invoking the commands /// sent to it one by one until the batch is finished. @@ -93,20 +87,21 @@ struct TransactionOutput { /// One `CommandReceiver` can execute exactly one batch, so once the batch is sealed, a new `CommandReceiver` object must /// be constructed. #[derive(Debug)] -struct CommandReceiver { +struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, commands: mpsc::Receiver, + _storage: PhantomData, } -impl CommandReceiver { - pub(super) fn run( +impl CommandReceiver { + pub(super) fn run( mut self, storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> anyhow::Result<()> { + ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); let storage_view = StorageView::new(storage).to_rc_ptr(); @@ -116,13 +111,15 @@ impl CommandReceiver { storage_view.clone(), self.fast_vm_mode, ); + let mut batch_finished = false; while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { - let result = self - .execute_tx(&tx, &mut vm) - .with_context(|| format!("fatal error executing transaction {tx:?}"))?; + let tx_hash = tx.hash(); + let result = self.execute_tx(*tx, &mut vm).with_context(|| { + format!("fatal error executing transaction {tx_hash:?}") + })?; if resp.send(result).is_err() { break; } @@ -144,36 +141,34 @@ impl CommandReceiver { if resp.send(vm_block_result).is_err() { break; } - - // `storage_view` cannot be accessed while borrowed by the VM, - // so this is the only point at which storage metrics can be obtained - let metrics = storage_view.as_ref().borrow_mut().metrics(); - EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] - .observe(metrics.time_spent_on_get_value); - EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] - .observe(metrics.time_spent_on_set_value); - return Ok(()); - } - Command::FinishBatchWithCache(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; - let cache = (*storage_view).borrow().cache(); - if resp.send((vm_block_result, cache)).is_err() { - break; - } - return Ok(()); + batch_finished = true; + break; } } } - // State keeper can exit because of stop signal, so it's OK to exit mid-batch. - tracing::info!("State keeper exited with an unfinished L1 batch"); - Ok(()) + + drop(vm); + let storage_view = Rc::into_inner(storage_view) + .context("storage view leaked")? + .into_inner(); + if batch_finished { + let metrics = storage_view.metrics(); + EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] + .observe(metrics.time_spent_on_get_value); + EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] + .observe(metrics.time_spent_on_set_value); + } else { + // State keeper can exit because of stop signal, so it's OK to exit mid-batch. + tracing::info!("State keeper exited with an unfinished L1 batch"); + } + Ok(storage_view) } - fn execute_tx( + fn execute_tx( &self, - tx: &Transaction, + transaction: Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). vm.pop_snapshot_no_rollback(); @@ -182,47 +177,23 @@ impl CommandReceiver { // Execute the transaction. let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::Execution].start(); - let output = if self.optional_bytecode_compression { - self.execute_tx_in_vm_with_optional_compression(tx, vm)? + let result = if self.optional_bytecode_compression { + self.execute_tx_in_vm_with_optional_compression(&transaction, vm)? } else { - self.execute_tx_in_vm(tx, vm)? + self.execute_tx_in_vm(&transaction, vm)? }; latency.observe(); - APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); - APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); - let TransactionOutput { - tx_result, - compressed_bytecodes, - calls, - } = output; - - if let ExecutionResult::Halt { reason } = tx_result.result { - return Ok(match reason { - Halt::BootloaderOutOfGas => TxExecutionResult::BootloaderOutOfGasForTx, - _ => TxExecutionResult::RejectedByVm { reason }, - }); - } - - let tx_metrics = ExecutionMetricsForCriteria::new(Some(tx), &tx_result); - let gas_remaining = vm.gas_remaining(); - - Ok(TxExecutionResult::Success { - tx_result: Box::new(tx_result), - tx_metrics: Box::new(tx_metrics), - compressed_bytecodes, - call_tracer_result: calls, - gas_remaining, - }) + Ok(result) } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut VmInstance) { let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::TxRollback].start(); vm.rollback_to_the_latest_snapshot(); latency.observe(); } - fn start_next_l2_block( + fn start_next_l2_block( &self, l2_block_env: L2BlockEnv, vm: &mut VmInstance, @@ -230,7 +201,7 @@ impl CommandReceiver { vm.start_new_l2_block(l2_block_env); } - fn finish_batch( + fn finish_batch( &self, vm: &mut VmInstance, ) -> anyhow::Result { @@ -249,11 +220,11 @@ impl CommandReceiver { /// Attempts to execute transaction with or without bytecode compression. /// If compression fails, the transaction will be re-executed without compression. - fn execute_tx_in_vm_with_optional_compression( + fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. // In order to not let the accounts spam the space of compressed bytecodes with bytecodes @@ -270,19 +241,17 @@ impl CommandReceiver { vec![] }; - if let (Ok(()), tx_result) = + if let (Ok(compressed_bytecodes), tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) { - let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - - let calls = Arc::try_unwrap(call_tracer_result) + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - return Ok(TransactionOutput { - tx_result, - compressed_bytecodes, - calls, + return Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), + compressed_bytecodes: compressed_bytecodes.into_owned(), + call_traces, }); } @@ -300,29 +269,30 @@ impl CommandReceiver { let (compression_result, tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), false); - compression_result.context("compression failed when it wasn't applied")?; - let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); + let compressed_bytecodes = compression_result + .context("compression failed when it wasn't applied")? + .into_owned(); // TODO implement tracer manager which will be responsible // for collecting result from all tracers and save it to the database - let calls = Arc::try_unwrap(call_tracer_result) + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - Ok(TransactionOutput { - tx_result, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes, - calls, + call_traces, }) } /// Attempts to execute transaction with mandatory bytecode compression. /// If bytecode compression fails, the transaction will be rejected. - fn execute_tx_in_vm( + fn execute_tx_in_vm( &self, tx: &Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] @@ -330,28 +300,27 @@ impl CommandReceiver { vec![] }; - let (published_bytecodes, mut tx_result) = + let (bytecodes_result, mut tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true); - if published_bytecodes.is_ok() { - let compressed_bytecodes = vm.get_last_tx_compressed_bytecodes(); - let calls = Arc::try_unwrap(call_tracer_result) + if let Ok(compressed_bytecodes) = bytecodes_result { + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - Ok(TransactionOutput { - tx_result, - compressed_bytecodes, - calls, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), + compressed_bytecodes: compressed_bytecodes.into_owned(), + call_traces, }) } else { // Transaction failed to publish bytecodes, we reject it so initiator doesn't pay fee. tx_result.result = ExecutionResult::Halt { reason: Halt::FailedToPublishCompressedBytecodes, }; - Ok(TransactionOutput { - tx_result, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes: vec![], - calls: vec![], + call_traces: vec![], }) } } diff --git a/core/lib/vm_executor/src/batch/metrics.rs b/core/lib/vm_executor/src/batch/metrics.rs new file mode 100644 index 000000000000..6851193e9be9 --- /dev/null +++ b/core/lib/vm_executor/src/batch/metrics.rs @@ -0,0 +1,90 @@ +//! Main batch executor metrics. + +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::VmExecutionResultAndLogs; + +use crate::shared::InteractionType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "command", rename_all = "snake_case")] +pub(super) enum ExecutorCommand { + ExecuteTx, + #[metrics(name = "start_next_miniblock")] + StartNextL2Block, + RollbackLastTx, + FinishBatch, +} + +const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ + 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., +]); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum TxExecutionStage { + Execution, + TxRollback, +} + +/// Executor-related metrics. +#[derive(Debug, Metrics)] +#[metrics(prefix = "state_keeper")] +pub(super) struct ExecutorMetrics { + /// Latency to process a single command sent to the batch executor. + #[metrics(buckets = Buckets::LATENCIES)] + pub batch_executor_command_response_time: Family>, + #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] + pub computational_gas_per_nanosecond: Histogram, + #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] + pub failed_tx_gas_limit_per_nanosecond: Histogram, + /// Cumulative latency of interacting with the storage when executing a transaction + /// in the batch executor. + #[metrics(buckets = Buckets::LATENCIES)] + pub batch_storage_interaction_duration: Family>, +} + +#[vise::register] +pub(super) static EXECUTOR_METRICS: vise::Global = vise::Global::new(); + +/// Some more executor-related metrics with differing prefix. +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_state_keeper")] +pub(super) struct StateKeeperMetrics { + /// Time spent by the state keeper on transaction execution. + #[metrics(buckets = Buckets::LATENCIES)] + pub tx_execution_time: Family>, +} + +#[vise::register] +pub(super) static KEEPER_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "batch_tip")] +pub(super) struct BatchTipMetrics { + #[metrics(buckets = Buckets::exponential(60000.0..=80000000.0, 2.0))] + gas_used: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] + pubdata_published: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] + circuit_statistic: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] + execution_metrics_size: Histogram, +} + +impl BatchTipMetrics { + pub fn observe(&self, execution_result: &VmExecutionResultAndLogs) { + self.gas_used + .observe(execution_result.statistics.gas_used as usize); + self.pubdata_published + .observe(execution_result.statistics.pubdata_published as usize); + self.circuit_statistic + .observe(execution_result.statistics.circuit_statistic.total()); + self.execution_metrics_size + .observe(execution_result.get_execution_metrics(None).size()); + } +} + +#[vise::register] +pub(super) static BATCH_TIP_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/vm_executor/src/batch/mod.rs b/core/lib/vm_executor/src/batch/mod.rs new file mode 100644 index 000000000000..2407d2daba2c --- /dev/null +++ b/core/lib/vm_executor/src/batch/mod.rs @@ -0,0 +1,9 @@ +//! Main implementation of ZKsync VM [batch executor](crate::interface::BatchExecutor). +//! +//! This implementation is used by various ZKsync components, like the state keeper and components based on the VM runner. + +pub use self::{executor::MainBatchExecutor, factory::MainBatchExecutorFactory}; + +mod executor; +mod factory; +mod metrics; diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs new file mode 100644 index 000000000000..1a0fbb002df9 --- /dev/null +++ b/core/lib/vm_executor/src/lib.rs @@ -0,0 +1,11 @@ +//! Implementations of ZKsync VM executors and executor-related utils. +//! +//! The included implementations are separated from the respective interfaces since they depend +//! on [VM implementations](zksync_multivm), are aware of ZKsync node storage etc. + +pub use zksync_multivm::interface::executor as interface; + +pub mod batch; +pub mod oneshot; +mod shared; +pub mod storage; diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs new file mode 100644 index 000000000000..8a89ce0a9a4f --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -0,0 +1,143 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::{storage::StorageViewMetrics, VmMemoryMetrics}; + +use crate::shared::InteractionType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "type", rename_all = "snake_case")] +enum SizeType { + Inner, + History, +} + +const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ + 1_000.0, + 10_000.0, + 100_000.0, + 500_000.0, + 1_000_000.0, + 5_000_000.0, + 10_000_000.0, + 50_000_000.0, + 100_000_000.0, + 500_000_000.0, + 1_000_000_000.0, +]); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_memory")] +struct RuntimeContextMemoryMetrics { + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + event_sink_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + memory_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + decommitter_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_view_cache_size: Histogram, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + full: Histogram, +} + +#[vise::register] +static MEMORY_METRICS: vise::Global = vise::Global::new(); + +const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_storage_interaction")] +struct RuntimeContextStorageMetrics { + #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] + amount: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration_per_unit: Family>, + #[metrics(buckets = Buckets::ZERO_TO_ONE)] + ratio: Histogram, +} + +#[vise::register] +static STORAGE_METRICS: vise::Global = vise::Global::new(); + +pub(super) fn report_vm_memory_metrics( + tx_id: &str, + memory_metrics: &VmMemoryMetrics, + vm_execution_took: Duration, + storage_metrics: StorageViewMetrics, +) { + MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); + MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); + MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); + MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); + MEMORY_METRICS.decommitter_size[&SizeType::Inner] + .observe(memory_metrics.decommittment_processor_inner); + MEMORY_METRICS.decommitter_size[&SizeType::History] + .observe(memory_metrics.decommittment_processor_history); + MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); + MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); + + MEMORY_METRICS + .storage_view_cache_size + .observe(storage_metrics.cache_size); + MEMORY_METRICS + .full + .observe(memory_metrics.full_size() + storage_metrics.cache_size); + + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; + + STORAGE_METRICS.amount[&InteractionType::Missed] + .observe(storage_metrics.storage_invocations_missed); + STORAGE_METRICS.amount[&InteractionType::GetValue] + .observe(storage_metrics.get_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::SetValue] + .observe(storage_metrics.set_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); + + STORAGE_METRICS.duration[&InteractionType::Missed] + .observe(storage_metrics.time_spent_on_storage_missed); + STORAGE_METRICS.duration[&InteractionType::GetValue] + .observe(storage_metrics.time_spent_on_get_value); + STORAGE_METRICS.duration[&InteractionType::SetValue] + .observe(storage_metrics.time_spent_on_set_value); + STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); + + if total_storage_invocations > 0 { + STORAGE_METRICS.duration_per_unit[&InteractionType::Total] + .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); + } + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics + .time_spent_on_storage_missed + .div_f64(storage_metrics.storage_invocations_missed as f64); + STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); + } + + STORAGE_METRICS + .ratio + .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); + + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + tracing::info!( + "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, + ); + } +} diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs new file mode 100644 index 000000000000..8f3a12603c1a --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -0,0 +1,136 @@ +use std::fmt; + +use async_trait::async_trait; +use zksync_multivm::interface::{ + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TxExecutionArgs, TxExecutionMode, VmExecutionResultAndLogs, +}; +use zksync_types::{l2::L2Tx, Transaction}; + +type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; + +/// Mock [`OneshotExecutor`] implementation. +pub struct MockOneshotExecutor { + call_responses: Box, + tx_responses: Box, +} + +impl fmt::Debug for MockOneshotExecutor { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("MockTransactionExecutor") + .finish_non_exhaustive() + } +} + +impl Default for MockOneshotExecutor { + fn default() -> Self { + Self { + call_responses: Box::new(|tx, _| { + panic!("Unexpected call with data {:?}", tx.execute.calldata()); + }), + tx_responses: Box::new(|tx, _| { + panic!("Unexpect transaction call: {tx:?}"); + }), + } + } +} + +impl MockOneshotExecutor { + /// Sets call response closure used by this executor. + pub fn set_call_responses(&mut self, responses: F) + where + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, + { + self.call_responses = self.wrap_responses(responses); + } + + /// Sets transaction response closure used by this executor. The closure will be called both for transaction execution / validation, + /// and for gas estimation. + pub fn set_tx_responses(&mut self, responses: F) + where + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, + { + self.tx_responses = self.wrap_responses(responses); + } + + fn wrap_responses(&mut self, responses: F) -> Box + where + F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, + { + Box::new( + move |tx: &Transaction, env: &OneshotEnv| -> VmExecutionResultAndLogs { + VmExecutionResultAndLogs { + result: responses(tx, env), + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + } + }, + ) + } + + /// Same as [`Self::set_tx_responses()`], but allows to customize returned VM logs etc. + pub fn set_full_tx_responses(&mut self, responses: F) + where + F: Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + 'static + Send + Sync, + { + self.tx_responses = Box::new(responses); + } + + fn mock_inspect(&self, env: OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { + match env.system.execution_mode { + TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, &env), + TxExecutionMode::VerifyExecute | TxExecutionMode::EstimateFee => { + (self.tx_responses)(&args.transaction, &env) + } + } + } +} + +#[async_trait] +impl OneshotExecutor for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn inspect_transaction_with_bytecode_compression( + &self, + _storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + _params: OneshotTracingParams, + ) -> anyhow::Result { + Ok(OneshotTransactionExecutionResult { + tx_result: Box::new(self.mock_inspect(env, args)), + compression_result: Ok(()), + call_traces: vec![], + }) + } +} + +#[async_trait] +impl TransactionValidator for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( + &self, + _storage: S, + env: OneshotEnv, + tx: L2Tx, + _validation_params: ValidationParams, + ) -> anyhow::Result> { + Ok( + match self + .mock_inspect(env, TxExecutionArgs::for_validation(tx)) + .result + { + ExecutionResult::Halt { reason } => Err(ValidationError::FailedTx(reason)), + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => Ok(()), + }, + ) + } +} diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs new file mode 100644 index 000000000000..cac8edfdfdf8 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -0,0 +1,291 @@ +//! Oneshot VM executor. + +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context; +use async_trait::async_trait; +use once_cell::sync::OnceCell; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + }, + tracers::{CallTracer, StorageInvocations, ValidationTracer}, + utils::adjust_pubdata_price_for_tx, + vm_latest::HistoryDisabled, + zk_evm_latest::ethereum_types::U256, + MultiVMTracer, VmInstance, +}; +use zksync_types::{ + block::pack_block_info, + get_nonce_key, + l2::L2Tx, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub use self::mock::MockOneshotExecutor; + +mod metrics; +mod mock; + +/// Main [`OneshotExecutor`] implementation used by the API server. +#[derive(Debug, Default)] +pub struct MainOneshotExecutor { + missed_storage_invocation_limit: usize, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl MainOneshotExecutor { + /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). + /// The limit is applied for calls and gas estimations, but not during transaction validation. + pub fn new(missed_storage_invocation_limit: usize) -> Self { + Self { + missed_storage_invocation_limit, + execution_latency_histogram: None, + } + } + + /// Sets a histogram for measuring VM execution latency. + pub fn set_execution_latency_histogram( + &mut self, + histogram: &'static vise::Histogram, + ) { + self.execution_latency_histogram = Some(histogram); + } +} + +#[async_trait] +impl OneshotExecutor for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + params: OneshotTracingParams, + ) -> anyhow::Result { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } + }; + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let mut tracers = vec![]; + let mut calls_result = Arc::>::default(); + if params.trace_calls { + tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); + } + tracers.push( + StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), + ); + + let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); + let mut result = executor.apply(|vm, transaction| { + let (compression_result, tx_result) = vm + .inspect_transaction_with_bytecode_compression( + tracers.into(), + transaction, + true, + ); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: vec![], + } + }); + + result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); + result + }) + .await + .context("VM execution panicked") + } +} + +#[async_trait] +impl TransactionValidator for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { + anyhow::ensure!( + env.system.execution_mode == TxExecutionMode::VerifyExecute, + "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", + env.system.execution_mode + ); + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let (validation_tracer, mut validation_result) = + ValidationTracer::::new( + validation_params, + env.system.version.into(), + ); + let tracers = vec![validation_tracer.into_tracer_pointer()]; + + let executor = VmSandbox::new( + storage, + env, + TxExecutionArgs::for_validation(tx), + execution_latency_histogram, + ); + let exec_result = executor.apply(|vm, transaction| { + vm.push_transaction(transaction); + vm.inspect(tracers.into(), VmExecutionMode::OneTx) + }); + let validation_result = Arc::make_mut(&mut validation_result) + .take() + .map_or(Ok(()), Err); + + match (exec_result.result, validation_result) { + (_, Err(violated_rule)) => Err(ValidationError::ViolatedRule(violated_rule)), + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), + _ => Ok(()), + } + }) + .await + .context("VM execution panicked") + } +} + +#[derive(Debug)] +struct VmSandbox { + vm: Box>, + storage_view: StoragePtr>, + transaction: Transaction, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { + /// This method is blocking. + fn new( + storage: S, + mut env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, + ) -> Self { + let mut storage_view = StorageView::new(storage); + Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); + + let protocol_version = env.system.version; + if execution_args.adjust_pubdata_price { + env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + env.l1_batch.fee_input, + execution_args.transaction.gas_per_pubdata_byte_limit(), + env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let storage_view = storage_view.to_rc_ptr(); + let vm = Box::new(VmInstance::new_with_specific_version( + env.l1_batch, + env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )); + + Self { + vm, + storage_view, + transaction: execution_args.transaction, + execution_latency_histogram, + } + } + + /// This method is blocking. + fn setup_storage_view( + storage_view: &mut StorageView, + execution_args: &TxExecutionArgs, + current_block: Option, + ) { + let storage_view_setup_started_at = Instant::now(); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + } + + let payer = execution_args.transaction.payer(); + let balance_key = storage_key_for_eth_balance(&payer); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); + + // Reset L2 block info if necessary. + if let Some(current_block) = current_block { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = + pack_block_info(current_block.number.into(), current_block.timestamp); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + storage_view.set_value( + l2_block_txs_rolling_hash_key, + current_block.txs_rolling_hash, + ); + } + + let storage_view_setup_time = storage_view_setup_started_at.elapsed(); + // We don't want to emit too many logs. + if storage_view_setup_time > Duration::from_millis(10) { + tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + } + } + + pub(super) fn apply(mut self, apply_fn: F) -> T + where + F: FnOnce(&mut VmInstance, Transaction) -> T, + { + let tx_id = format!( + "{:?}-{}", + self.transaction.initiator_account(), + self.transaction.nonce().unwrap_or(Nonce(0)) + ); + + let started_at = Instant::now(); + let result = apply_fn(&mut *self.vm, self.transaction); + let vm_execution_took = started_at.elapsed(); + + if let Some(histogram) = self.execution_latency_histogram { + histogram.observe(vm_execution_took); + } + let memory_metrics = self.vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + self.storage_view.as_ref().borrow_mut().metrics(), + ); + result + } +} diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs new file mode 100644 index 000000000000..420005be05d6 --- /dev/null +++ b/core/lib/vm_executor/src/shared.rs @@ -0,0 +1,12 @@ +//! Functionality shared among different types of executors. + +use vise::{EncodeLabelSet, EncodeLabelValue}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "interaction", rename_all = "snake_case")] +pub(crate) enum InteractionType { + Missed, + GetValue, + SetValue, + Total, +} diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_executor/src/storage.rs similarity index 98% rename from core/lib/vm_utils/src/storage.rs rename to core/lib/vm_executor/src/storage.rs index 1e43543bc5aa..e39748786a30 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -1,13 +1,15 @@ +//! Utils to get data for L1 batch execution from storage. + use std::time::{Duration, Instant}; use anyhow::Context; use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_types::{ block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = u32::MAX; diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 1d4efe06634b..694576dca3b0 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -15,7 +15,13 @@ zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_types.workspace = true +anyhow.workspace = true +async-trait.workspace = true hex.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true + +[dev-dependencies] +assert_matches.workspace = true +serde_json.workspace = true diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs new file mode 100644 index 000000000000..119f975fecd5 --- /dev/null +++ b/core/lib/vm_interface/src/executor.rs @@ -0,0 +1,72 @@ +//! High-level executor traits. + +use std::fmt; + +use async_trait::async_trait; +use zksync_types::{l2::L2Tx, Transaction}; + +use crate::{ + storage::{ReadStorage, StorageView}, + tracer::{ValidationError, ValidationParams}, + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, OneshotEnv, + OneshotTracingParams, OneshotTransactionExecutionResult, SystemEnv, TxExecutionArgs, +}; + +/// Factory of [`BatchExecutor`]s. +pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { + /// Initializes an executor for a batch with the specified params and using the provided storage. + fn init_batch( + &mut self, + storage: S, + l1_batch_params: L1BatchEnv, + system_env: SystemEnv, + ) -> Box>; +} + +/// Handle for executing a single L1 batch. +/// +/// The handle is parametric by the transaction execution output in order to be able to represent different +/// levels of abstraction. +#[async_trait] +pub trait BatchExecutor: 'static + Send + fmt::Debug { + /// Executes a transaction. + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result; + + /// Rolls back the last executed transaction. + async fn rollback_last_tx(&mut self) -> anyhow::Result<()>; + + /// Starts a next L2 block with the specified params. + async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()>; + + /// Finished the current L1 batch. + async fn finish_batch(self: Box) -> anyhow::Result<(FinishedL1Batch, StorageView)>; +} + +/// VM executor capable of executing isolated transactions / calls (as opposed to [batch execution](BatchExecutor)). +#[async_trait] +pub trait OneshotExecutor { + /// Executes a transaction or call with optional tracers. + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracing: OneshotTracingParams, + ) -> anyhow::Result; +} + +/// VM executor capable of validating transactions. +#[async_trait] +pub trait TransactionValidator: OneshotExecutor { + /// Validates the provided transaction. + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result>; +} diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index b2b7d6484dad..2b30f82e0ce5 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -20,22 +20,27 @@ pub use crate::{ types::{ errors::{ - BytecodeCompressionError, Halt, TxRevertReason, VmRevertReason, - VmRevertReasonParsingError, + BytecodeCompressionError, BytecodeCompressionResult, Halt, TxRevertReason, + VmRevertReason, VmRevertReasonParsingError, + }, + inputs::{ + L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, + TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, - inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, outputs::{ - BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, - CurrentExecutionState, DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, - L2Block, Refunds, TransactionExecutionMetrics, TransactionExecutionResult, - TxExecutionStatus, VmEvent, VmExecutionLogs, VmExecutionMetrics, - VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, + CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, + VmMemoryMetrics, }, tracer, }, - vm::{VmFactory, VmInterface, VmInterfaceHistoryEnabled}, + vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled}, }; +pub mod executor; pub mod storage; mod types; mod vm; diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index d4b5e57f1fa0..6a8b56433455 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -12,7 +12,7 @@ use super::ReadStorage; pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct InMemoryStorage { state: HashMap, factory_deps: HashMap>, diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 96cc1f19862c..9b92ef8b7705 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -5,10 +5,12 @@ use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, + snapshot::{StorageSnapshot, StorageWithSnapshot}, view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, }; mod in_memory; +mod snapshot; mod view; /// Functionality to read from the VM storage. diff --git a/core/lib/vm_interface/src/storage/snapshot.rs b/core/lib/vm_interface/src/storage/snapshot.rs new file mode 100644 index 000000000000..a0175ff478a3 --- /dev/null +++ b/core/lib/vm_interface/src/storage/snapshot.rs @@ -0,0 +1,189 @@ +use std::{collections::HashMap, fmt}; + +use serde::{Deserialize, Serialize}; +use zksync_types::{web3, StorageKey, StorageValue, H256}; + +use super::ReadStorage; + +/// Self-sufficient or almost self-sufficient storage snapshot for a particular VM execution (e.g., executing a single L1 batch). +/// +/// `StorageSnapshot` works somewhat similarly to [`InMemoryStorage`](super::InMemoryStorage), but has different semantics +/// and use cases. `InMemoryStorage` is intended to be a modifiable storage to be used primarily in tests / benchmarks. +/// In contrast, `StorageSnapshot` cannot be modified once created and is intended to represent a complete or almost complete snapshot +/// for a particular VM execution. It can serve as a preloaded cache for a certain [`ReadStorage`] implementation +/// that significantly reduces the number of storage accesses. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StorageSnapshot { + // `Option` encompasses entire map value for more efficient serialization + storage: HashMap>, + // `Bytes` are used to have efficient serialization + factory_deps: HashMap, +} + +impl StorageSnapshot { + /// Creates a new storage snapshot. + /// + /// # Arguments + /// + /// - `storage` should contain all storage slots accessed during VM execution, i.e. protective reads + initial / repeated writes + /// for batch execution, keyed by the hashed storage key. `None` map values correspond to accessed slots without an assigned enum index. + /// By definition, all these slots are guaranteed to have zero value. + pub fn new( + storage: HashMap>, + factory_deps: HashMap>, + ) -> Self { + Self { + storage, + factory_deps: factory_deps + .into_iter() + .map(|(hash, bytecode)| (hash, web3::Bytes(bytecode))) + .collect(), + } + } + + /// Creates a [`ReadStorage`] implementation based on this snapshot and the provided fallback implementation. + /// Fallback will be called for storage slots / factory deps not in this snapshot (which, if this snapshot + /// is reasonably constructed, would be a rare occurrence). If `shadow` flag is set, the fallback will be + /// consulted for *every* operation; this obviously harms performance and is mostly useful for testing. + /// + /// The caller is responsible for ensuring that the fallback actually corresponds to the snapshot. + pub fn with_fallback( + self, + fallback: S, + shadow: bool, + ) -> StorageWithSnapshot { + StorageWithSnapshot { + snapshot: self, + fallback, + shadow, + } + } +} + +/// [`StorageSnapshot`] wrapper implementing [`ReadStorage`] trait. Created using [`with_fallback()`](StorageSnapshot::with_fallback()). +/// +/// # Why fallback? +/// +/// The reason we require a fallback is that it may be difficult to create a 100%-complete snapshot in the general case. +/// E.g., for batch execution, the data is mostly present in Postgres (provided that protective reads are recorded), +/// but in some scenarios, accessed slots may be not recorded anywhere (e.g., if a slot is written to and then reverted in the same block). +/// In practice, there are order of 10 such slots for a mainnet batch with ~5,000 transactions / ~35,000 accessed slots; +/// i.e., snapshots still can provide a good speed-up boost. +#[derive(Debug)] +pub struct StorageWithSnapshot { + snapshot: StorageSnapshot, + fallback: S, + shadow: bool, +} + +impl StorageWithSnapshot { + fn fallback( + &mut self, + operation: fmt::Arguments<'_>, + value: Option, + f: impl FnOnce(&mut S) -> T, + ) -> T { + if let Some(value) = value { + if self.shadow { + let fallback_value = f(&mut self.fallback); + assert_eq!(value, fallback_value, "mismatch in {operation} output"); + } + return value; + } + tracing::trace!("Output for {operation} is missing in snapshot"); + f(&mut self.fallback) + } +} + +impl ReadStorage for StorageWithSnapshot { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let value = self + .snapshot + .storage + .get(&key.hashed_key()) + .map(|entry| entry.unwrap_or_default().0); + self.fallback(format_args!("read_value({key:?})"), value, |storage| { + storage.read_value(key) + }) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let is_initial = self + .snapshot + .storage + .get(&key.hashed_key()) + .map(Option::is_none); + self.fallback( + format_args!("is_write_initial({key:?})"), + is_initial, + |storage| storage.is_write_initial(key), + ) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + let dep = self + .snapshot + .factory_deps + .get(&hash) + .map(|dep| Some(dep.0.clone())); + self.fallback(format_args!("load_factory_dep({hash})"), dep, |storage| { + storage.load_factory_dep(hash) + }) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let enum_index = self + .snapshot + .storage + .get(&key.hashed_key()) + .map(|entry| entry.map(|(_, idx)| idx)); + self.fallback( + format_args!("get_enumeration_index({key:?})"), + enum_index, + |storage| storage.get_enumeration_index(key), + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serializing_snapshot_to_json() { + let snapshot = StorageSnapshot::new( + HashMap::from([ + (H256::repeat_byte(1), Some((H256::from_low_u64_be(1), 10))), + ( + H256::repeat_byte(0x23), + Some((H256::from_low_u64_be(100), 100)), + ), + (H256::repeat_byte(0xff), None), + ]), + HashMap::from([(H256::repeat_byte(2), (0..32).collect())]), + ); + let expected_json = serde_json::json!({ + "storage": { + "0x0101010101010101010101010101010101010101010101010101010101010101": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + 10, + ], + "0x2323232323232323232323232323232323232323232323232323232323232323": [ + "0x0000000000000000000000000000000000000000000000000000000000000064", + 100, + ], + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff": null, + }, + "factory_deps": { + "0x0202020202020202020202020202020202020202020202020202020202020202": + "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + }, + }); + let actual_json = serde_json::to_value(&snapshot).unwrap(); + assert_eq!(actual_json, expected_json); + + let restored: StorageSnapshot = serde_json::from_value(actual_json).unwrap(); + assert_eq!(restored.storage, snapshot.storage); + assert_eq!(restored.factory_deps, snapshot.factory_deps); + } +} diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index 691a9d442ca8..101f5c82f497 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,7 +102,7 @@ where } } -impl StorageView { +impl StorageView { /// Creates a new storage view based on the underlying storage. pub fn new(storage_handle: S) -> Self { Self { diff --git a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs index 418be6b85733..c0c6e8737bbe 100644 --- a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs +++ b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs @@ -1,3 +1,7 @@ +use std::borrow::Cow; + +use crate::CompressedBytecodeInfo; + /// Errors related to bytecode compression. #[derive(Debug, thiserror::Error)] #[non_exhaustive] @@ -5,3 +9,7 @@ pub enum BytecodeCompressionError { #[error("Bytecode compression failed")] BytecodeCompressionFailed, } + +/// Result of compressing bytecodes used by a transaction. +pub type BytecodeCompressionResult<'a> = + Result, BytecodeCompressionError>; diff --git a/core/lib/vm_interface/src/types/errors/mod.rs b/core/lib/vm_interface/src/types/errors/mod.rs index 070e7aa28427..a8b2df15c62b 100644 --- a/core/lib/vm_interface/src/types/errors/mod.rs +++ b/core/lib/vm_interface/src/types/errors/mod.rs @@ -1,6 +1,6 @@ pub use self::{ bootloader_error::BootloaderErrorCode, - bytecode_compression::BytecodeCompressionError, + bytecode_compression::{BytecodeCompressionError, BytecodeCompressionResult}, halt::Halt, tx_revert_reason::TxRevertReason, vm_revert_reason::{VmRevertReason, VmRevertReasonParsingError}, diff --git a/core/lib/vm_interface/src/types/errors/vm_revert_reason.rs b/core/lib/vm_interface/src/types/errors/vm_revert_reason.rs index d76b7d4ddb9f..25ca5ebfe34b 100644 --- a/core/lib/vm_interface/src/types/errors/vm_revert_reason.rs +++ b/core/lib/vm_interface/src/types/errors/vm_revert_reason.rs @@ -169,6 +169,8 @@ impl fmt::Display for VmRevertReason { #[cfg(test)] mod tests { + use assert_matches::assert_matches; + use super::VmRevertReason; #[test] @@ -204,7 +206,7 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let reason = VmRevertReason::try_from_bytes(msg.as_slice()).expect("Shouldn't be error"); - assert!(matches!(reason, VmRevertReason::Unknown { .. })); + assert_matches!(reason, VmRevertReason::Unknown { .. }); } #[test] diff --git a/core/lib/vm_interface/src/types/inputs/l2_block.rs b/core/lib/vm_interface/src/types/inputs/l2_block.rs index 7c9a028bbad7..b081dfbdeacc 100644 --- a/core/lib/vm_interface/src/types/inputs/l2_block.rs +++ b/core/lib/vm_interface/src/types/inputs/l2_block.rs @@ -10,12 +10,21 @@ pub struct L2BlockEnv { } impl L2BlockEnv { - pub fn from_l2_block_data(miniblock_execution_data: &L2BlockExecutionData) -> Self { + pub fn from_l2_block_data(execution_data: &L2BlockExecutionData) -> Self { Self { - number: miniblock_execution_data.number.0, - timestamp: miniblock_execution_data.timestamp, - prev_block_hash: miniblock_execution_data.prev_block_hash, - max_virtual_blocks_to_create: miniblock_execution_data.virtual_blocks, + number: execution_data.number.0, + timestamp: execution_data.timestamp, + prev_block_hash: execution_data.prev_block_hash, + max_virtual_blocks_to_create: execution_data.virtual_blocks, } } } + +/// Current block information stored in the system context contract. Can be used to set up +/// oneshot transaction / call execution. +#[derive(Debug, Clone, Copy)] +pub struct StoredL2BlockEnv { + pub number: u32, + pub timestamp: u64, + pub txs_rolling_hash: H256, +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 1d2c49cdfa11..24f58ae72f16 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -1,7 +1,11 @@ +use zksync_types::{ + l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, +}; + pub use self::{ execution_mode::VmExecutionMode, l1_batch_env::L1BatchEnv, - l2_block::L2BlockEnv, + l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, }; @@ -9,3 +13,83 @@ mod execution_mode; mod l1_batch_env; mod l2_block; mod system_env; + +/// Full environment for oneshot transaction / call execution. +#[derive(Debug)] +pub struct OneshotEnv { + /// System environment. + pub system: SystemEnv, + /// Part of the environment specific to an L1 batch. + pub l1_batch: L1BatchEnv, + /// Part of the environment representing the current L2 block. Can be used to override storage slots + /// in the system context contract, which are set from `L1BatchEnv.first_l2_block` by default. + pub current_block: Option, +} + +/// Executor-independent arguments necessary to for oneshot transaction execution. +/// +/// # Developer guidelines +/// +/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these +/// are also provided to an executor. +#[derive(Debug)] +pub struct TxExecutionArgs { + /// Transaction / call itself. + pub transaction: Transaction, + /// Nonce override for the initiator account. + pub enforced_nonce: Option, + /// Balance added to the initiator account. + pub added_balance: U256, + /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= + /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the + /// current L1 prices for gas or pubdata. + pub adjust_pubdata_price: bool, +} + +impl TxExecutionArgs { + pub fn for_validation(tx: L2Tx) -> Self { + Self { + enforced_nonce: Some(tx.nonce()), + added_balance: U256::zero(), + adjust_pubdata_price: true, + transaction: tx.into(), + } + } + + pub fn for_eth_call(mut call: L2Tx) -> Self { + if call.common_data.signature.is_empty() { + call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + + Self { + enforced_nonce: None, + added_balance: U256::zero(), + adjust_pubdata_price: false, + transaction: call.into(), + } + } + + pub fn for_gas_estimate(transaction: Transaction) -> Self { + // For L2 transactions we need to explicitly put enough balance into the account of the users + // while for L1->L2 transactions the `to_mint` field plays this role + let added_balance = match &transaction.common_data { + ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, + ExecuteTransactionCommon::L1(_) => U256::zero(), + ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), + }; + + Self { + enforced_nonce: transaction.nonce(), + added_balance, + adjust_pubdata_price: true, + transaction, + } + } +} + +/// Inputs and outputs for all tracers supported for oneshot transaction / call execution. +#[derive(Debug, Default)] +pub struct OneshotTracingParams { + /// Whether to trace contract calls. + pub trace_calls: bool, +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 37e122c6d9d9..6f9c02f0b587 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -11,7 +11,8 @@ use zksync_types::{ }; use crate::{ - CompressedBytecodeInfo, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, + BytecodeCompressionError, CompressedBytecodeInfo, Halt, VmExecutionMetrics, + VmExecutionStatistics, VmRevertReason, }; const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ @@ -297,6 +298,35 @@ impl Call { } } +/// Mid-level transaction execution output returned by a [batch executor](crate::executor::BatchExecutor). +#[derive(Debug, Clone)] +pub struct BatchTransactionExecutionResult { + /// VM result. + pub tx_result: Box, + /// Compressed bytecodes used by the transaction. + pub compressed_bytecodes: Vec, + /// Call traces (if requested; otherwise, empty). + pub call_traces: Vec, +} + +impl BatchTransactionExecutionResult { + pub fn was_halted(&self) -> bool { + matches!(self.tx_result.result, ExecutionResult::Halt { .. }) + } +} + +/// Mid-level transaction execution output returned by a [oneshot executor](crate::executor::OneshotExecutor). +#[derive(Debug)] +pub struct OneshotTransactionExecutionResult { + /// VM result. + pub tx_result: Box, + /// Result of compressing bytecodes used by the transaction. + pub compression_result: Result<(), BytecodeCompressionError>, + /// Call traces (if requested; otherwise, empty). + pub call_traces: Vec, +} + +/// High-level transaction execution result used by the API server sandbox etc. #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { pub transaction: Transaction, diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 9c0afc6659f0..27241c2c0fae 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -1,6 +1,7 @@ use zksync_types::writes::StateDiffRecord; use super::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}; +use crate::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionStatistics}; /// State of the VM after the batch execution. #[derive(Debug, Clone)] @@ -16,3 +17,28 @@ pub struct FinishedL1Batch { /// List of state diffs. Could be none for old versions of the VM. pub state_diffs: Option>, } + +impl FinishedL1Batch { + pub fn mock() -> Self { + FinishedL1Batch { + block_tip_execution_result: VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: VmExecutionLogs::default(), + statistics: VmExecutionStatistics::default(), + refunds: Refunds::default(), + }, + final_execution_state: CurrentExecutionState { + events: vec![], + deduplicated_storage_logs: vec![], + used_contract_hashes: vec![], + user_l2_to_l1_logs: vec![], + system_logs: vec![], + storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), + }, + final_bootloader_memory: Some(vec![]), + pubdata_input: Some(vec![]), + state_diffs: Some(vec![]), + } + } +} diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index d24e1440f836..1fa1cd5d1688 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,7 +1,8 @@ pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ - Call, CallType, ExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, + BatchTransactionExecutionResult, Call, CallType, ExecutionResult, + OneshotTransactionExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, VmExecutionResultAndLogs, }, execution_state::{BootloaderMemory, CurrentExecutionState}, diff --git a/core/lib/vm_interface/src/types/tracer.rs b/core/lib/vm_interface/src/types/tracer.rs index 1b42b2eabbb3..ba07772c7f23 100644 --- a/core/lib/vm_interface/src/types/tracer.rs +++ b/core/lib/vm_interface/src/types/tracer.rs @@ -1,3 +1,7 @@ +use std::{collections::HashSet, fmt}; + +use zksync_types::{Address, U256}; + use crate::Halt; #[derive(Debug, Clone, PartialEq)] @@ -37,3 +41,78 @@ pub enum VmExecutionStopReason { VmFinished, TracerRequestedStop(TracerExecutionStopReason), } + +/// Transaction validation parameters. +#[derive(Debug, Clone)] +pub struct ValidationParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, +} + +/// Rules that can be violated when validating a transaction. +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + /// The transaction touched disallowed storage slots during validation. + TouchedDisallowedStorageSlots(Address, U256), + /// The transaction called a contract without attached bytecode. + CalledContractWithNoCode(Address), + /// The transaction touched disallowed context. + TouchedDisallowedContext, + /// The transaction used too much gas during validation. + TookTooManyComputationalGas(u32), +} + +impl fmt::Display for ViolatedValidationRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ViolatedValidationRule::TouchedDisallowedStorageSlots(contract, key) => write!( + f, + "Touched disallowed storage slots: address {contract:x}, key: {key:x}", + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {contract:x}") + } + ViolatedValidationRule::TouchedDisallowedContext => { + write!(f, "Touched disallowed context") + } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {gas_limit}" + ) + } + } + } +} + +/// Errors returned when validating a transaction. +#[derive(Debug)] +pub enum ValidationError { + /// VM execution was halted during validation. + FailedTx(Halt), + /// Transaction violated one of account validation rules. + ViolatedRule(ViolatedValidationRule), +} + +impl fmt::Display for ValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason) + } + Self::ViolatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index b8614a46c147..f70be52bd86a 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -14,9 +14,8 @@ use zksync_types::Transaction; use crate::{ - storage::StoragePtr, BootloaderMemory, BytecodeCompressionError, CompressedBytecodeInfo, - CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmMemoryMetrics, + storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, }; pub trait VmInterface { @@ -25,11 +24,6 @@ pub trait VmInterface { /// Push transaction to bootloader memory. fn push_transaction(&mut self, tx: Transaction); - /// Execute next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { - self.inspect(Self::TracerDispatcher::default(), execution_mode) - } - /// Execute next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. fn inspect( @@ -38,67 +32,48 @@ pub trait VmInterface { execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs; - /// Get bootloader memory. - fn get_bootloader_memory(&self) -> BootloaderMemory; - - /// Get last transaction's compressed bytecodes. - fn get_last_tx_compressed_bytecodes(&self) -> Vec; - /// Start a new L2 block. fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv); - /// Get the current state of the virtual machine. - fn get_current_execution_state(&self) -> CurrentExecutionState; - - /// Execute transaction with optional bytecode compression. - fn execute_transaction_with_bytecode_compression( - &mut self, - tx: Transaction, - with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { - self.inspect_transaction_with_bytecode_compression( - Self::TracerDispatcher::default(), - tx, - with_compression, - ) - } - /// Execute transaction with optional bytecode compression using custom tracers. fn inspect_transaction_with_bytecode_compression( &mut self, tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> ( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - ); + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); /// Record VM memory metrics. fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// How much gas is left in the current stack frame. - fn gas_remaining(&self) -> u32; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.execute(VmExecutionMode::Batch); - let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.get_bootloader_memory(); - FinishedL1Batch { - block_tip_execution_result: result, - final_execution_state: execution_state, - final_bootloader_memory: Some(bootloader_memory), - pubdata_input: None, - state_diffs: None, - } + fn finish_batch(&mut self) -> FinishedL1Batch; +} + +/// Extension trait for [`VmInterface`] that provides some additional methods. +pub trait VmInterfaceExt: VmInterface { + /// Executes the next VM step (either next transaction or bootloader or the whole batch). + fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + self.inspect(Self::TracerDispatcher::default(), execution_mode) + } + + /// Executes a transaction with optional bytecode compression. + fn execute_transaction_with_bytecode_compression( + &mut self, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { + self.inspect_transaction_with_bytecode_compression( + Self::TracerDispatcher::default(), + tx, + with_compression, + ) } } +impl VmInterfaceExt for T {} + /// Encapsulates creating VM instance based on the provided environment. pub trait VmFactory: VmInterface { /// Creates a new VM instance. diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs deleted file mode 100644 index 30f61eb69f21..000000000000 --- a/core/lib/vm_utils/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod storage; diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index a8246216eca3..7f0de4f3bca9 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -318,6 +318,7 @@ pub struct ClientBuilder { client: C, url: SensitiveUrl, rate_limit: (usize, Duration), + report_config: bool, network: Net, } @@ -328,6 +329,7 @@ impl fmt::Debug for ClientBuilder { .field("client", &any::type_name::()) .field("url", &self.url) .field("rate_limit", &self.rate_limit) + .field("report_config", &self.report_config) .field("network", &self.network) .finish_non_exhaustive() } @@ -340,6 +342,7 @@ impl ClientBuilder { client, url, rate_limit: (1, Duration::ZERO), + report_config: true, network: Net::default(), } } @@ -366,16 +369,25 @@ impl ClientBuilder { self } + /// Allows switching off config reporting for this client in logs and metrics. This is useful if a client is a short-living one + /// and is not injected as a dependency. + pub fn report_config(mut self, report: bool) -> Self { + self.report_config = report; + self + } + /// Builds the client. pub fn build(self) -> Client { - tracing::info!( - "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", - self.network, - self.client, - self.rate_limit - ); let rate_limit = SharedRateLimit::new(self.rate_limit.0, self.rate_limit.1); - METRICS.observe_config(self.network.metric_label(), &rate_limit); + if self.report_config { + tracing::info!( + "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", + self.network, + self.client, + self.rate_limit + ); + METRICS.observe_config(self.network.metric_label(), &rate_limit); + } Client { inner: self.client, diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index f42fe8de59d5..3aa16a9ab77c 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -60,6 +60,19 @@ pub struct EnrichedClientError { args: HashMap<&'static str, String>, } +/// Whether the error should be considered retriable. +pub fn is_retriable(err: &ClientError) -> bool { + match err { + ClientError::Transport(_) | ClientError::RequestTimeout => true, + ClientError::Call(err) => { + // At least some RPC providers use "internal error" in case of the server being overloaded + err.code() == ErrorCode::ServerIsBusy.code() + || err.code() == ErrorCode::InternalError.code() + } + _ => false, + } +} + /// Alias for a result with enriched client RPC error. pub type EnrichedClientResult = Result; @@ -87,15 +100,7 @@ impl EnrichedClientError { /// Whether the error should be considered retriable. pub fn is_retriable(&self) -> bool { - match self.as_ref() { - ClientError::Transport(_) | ClientError::RequestTimeout => true, - ClientError::Call(err) => { - // At least some RPC providers use "internal error" in case of the server being overloaded - err.code() == ErrorCode::ServerIsBusy.code() - || err.code() == ErrorCode::InternalError.code() - } - _ => false, - } + is_retriable(&self.inner_error) } } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index dac774dd7bdf..8a4d2db8c6fe 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -25,6 +25,9 @@ pub trait EnNamespace { #[method(name = "consensusGenesis")] async fn consensus_genesis(&self) -> RpcResult>; + #[method(name = "consensusGlobalConfig")] + async fn consensus_global_config(&self) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index 4eab88234749..6aa6e6a8b43a 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -11,12 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_dal.workspace = true zksync_config.workspace = true zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true zksync_env_config.workspace = true -zksync_node_genesis.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 8224b03da071..2d6af705f482 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -18,8 +18,8 @@ use zksync_config::{ GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, ProverJobMonitorConfig, PruningConfig, SnapshotRecoveryConfig, }, - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, + DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_env_config::FromEnv; @@ -68,6 +68,7 @@ pub struct TempConfigStore { pub gas_adjuster_config: Option, pub observability: Option, pub snapshot_creator: Option, + pub da_client_config: Option, pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, pub basic_witness_input_producer_config: Option, @@ -105,6 +106,7 @@ impl TempConfigStore { eth: self.eth_sender_config.clone(), snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), + da_client_config: self.da_client_config.clone(), da_dispatcher_config: self.da_dispatcher_config.clone(), protective_reads_writer_config: self.protective_reads_writer_config.clone(), basic_witness_input_producer_config: self.basic_witness_input_producer_config.clone(), @@ -188,6 +190,7 @@ fn load_env_config() -> anyhow::Result { gas_adjuster_config: GasAdjusterConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_client_config: DAClientConfig::from_env().ok(), da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index f7d40210b485..040e2a94a110 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -29,6 +29,7 @@ zksync_utils.workspace = true zksync_protobuf.workspace = true zksync_mini_merkle_tree.workspace = true zksync_multivm.workspace = true +zksync_vm_executor.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index c0c8398f690d..0fbf8abc3dd4 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -10,15 +10,11 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ - interface::{ - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, - }, - utils::adjust_pubdata_price_for_tx, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, - VmInstance, + interface::{L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv}, + utils::get_eth_call_gas_limit, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_state::PostgresStorage; use zksync_system_constants::{ @@ -26,393 +22,209 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; use zksync_types::{ - api::{self, state_override::StateOverride}, - block::{pack_block_info, unpack_block_info, L2BlockHasher}, + api, + block::{unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, - get_nonce_key, - utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, L1BatchNumber, L2BlockNumber, Nonce, ProtocolVersionId, StorageKey, Transaction, - H256, U256, + AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; +use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; use super::{ - storage::StorageWithOverrides, - vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + vm_metrics::{SandboxStage, SANDBOX_METRICS}, + BlockArgs, TxSetupArgs, }; -type VmStorageView<'a> = StorageView>>; -type BoxedVm<'a> = Box>, HistoryDisabled>>; +pub(super) async fn prepare_env_and_storage( + mut connection: Connection<'static, Core>, + setup_args: TxSetupArgs, + block_args: &BlockArgs, +) -> anyhow::Result<(OneshotEnv, PostgresStorage<'static>)> { + let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); -#[derive(Debug)] -struct Sandbox<'a> { - system_env: SystemEnv, - l1_batch_env: L1BatchEnv, - execution_args: &'a TxExecutionArgs, - l2_block_info_to_reset: Option, - storage_view: VmStorageView<'a>, -} - -impl<'a> Sandbox<'a> { - async fn new( - mut connection: Connection<'a, Core>, - shared_args: TxSharedArgs, - execution_args: &'a TxExecutionArgs, - block_args: BlockArgs, - state_override: &StateOverride, - ) -> anyhow::Result> { - let resolve_started_at = Instant::now(); - let resolved_block_info = block_args - .resolve_block_info(&mut connection) - .await - .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; - let resolve_time = resolve_started_at.elapsed(); - // We don't want to emit too many logs. - if resolve_time > Duration::from_millis(10) { - tracing::debug!("Resolved block numbers (took {resolve_time:?})"); - } - - if block_args.resolves_to_latest_sealed_l2_block() { - shared_args - .caches - .schedule_values_update(resolved_block_info.state_l2_block_number); - } - - let (next_l2_block_info, l2_block_info_to_reset) = Self::load_l2_block_info( - &mut connection, - block_args.is_pending_l2_block(), - &resolved_block_info, - ) - .await?; - - let storage = PostgresStorage::new_async( - Handle::current(), - connection, - resolved_block_info.state_l2_block_number, - false, - ) + let resolve_started_at = Instant::now(); + let resolved_block_info = block_args + .resolve_block_info(&mut connection) .await - .context("cannot create `PostgresStorage`")? - .with_caches(shared_args.caches.clone()); - - let storage_with_overrides = StorageWithOverrides::new(storage, state_override); - let storage_view = StorageView::new(storage_with_overrides); - let (system_env, l1_batch_env) = Self::prepare_env( - shared_args, - execution_args, - &resolved_block_info, - next_l2_block_info, - ); + .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; + let resolve_time = resolve_started_at.elapsed(); + // We don't want to emit too many logs. + if resolve_time > Duration::from_millis(10) { + tracing::debug!("Resolved block numbers (took {resolve_time:?})"); + } - Ok(Self { - system_env, - l1_batch_env, - storage_view, - execution_args, - l2_block_info_to_reset, - }) + if block_args.resolves_to_latest_sealed_l2_block() { + setup_args + .caches + .schedule_values_update(resolved_block_info.state_l2_block_number); } - async fn load_l2_block_info( - connection: &mut Connection<'_, Core>, - is_pending_block: bool, - resolved_block_info: &ResolvedBlockInfo, - ) -> anyhow::Result<(L2BlockEnv, Option)> { - let mut l2_block_info_to_reset = None; - let current_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number, - Some(resolved_block_info.state_l2_block_hash), - ) + let (next_block, current_block) = load_l2_block_info( + &mut connection, + block_args.is_pending_l2_block(), + &resolved_block_info, + ) + .await?; + + let storage = PostgresStorage::new_async( + Handle::current(), + connection, + resolved_block_info.state_l2_block_number, + false, + ) + .await + .context("cannot create `PostgresStorage`")? + .with_caches(setup_args.caches.clone()); + + let (system, l1_batch) = prepare_env(setup_args, &resolved_block_info, next_block); + + let env = OneshotEnv { + system, + l1_batch, + current_block, + }; + initialization_stage.observe(); + Ok((env, storage)) +} + +async fn load_l2_block_info( + connection: &mut Connection<'_, Core>, + is_pending_block: bool, + resolved_block_info: &ResolvedBlockInfo, +) -> anyhow::Result<(L2BlockEnv, Option)> { + let mut current_block = None; + let next_block = read_stored_l2_block(connection, resolved_block_info.state_l2_block_number) .await .context("failed reading L2 block info")?; - let next_l2_block_info = if is_pending_block { - L2BlockEnv { - number: current_l2_block_info.l2_block_number + 1, - timestamp: resolved_block_info.l1_batch_timestamp, - prev_block_hash: current_l2_block_info.l2_block_hash, - // For simplicity, we assume each L2 block create one virtual block. - // This may be wrong only during transition period. - max_virtual_blocks_to_create: 1, - } - } else if current_l2_block_info.l2_block_number == 0 { - // Special case: - // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. - // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` - // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. - L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - } - } else { - // We need to reset L2 block info in storage to process transaction in the current block context. - // Actual resetting will be done after `storage_view` is created. - let prev_l2_block_info = StoredL2BlockInfo::new( - connection, - resolved_block_info.state_l2_block_number - 1, - None, - ) + let next_block = if is_pending_block { + L2BlockEnv { + number: next_block.number + 1, + timestamp: resolved_block_info.l1_batch_timestamp, + prev_block_hash: resolved_block_info.state_l2_block_hash, + // For simplicity, we assume each L2 block create one virtual block. + // This may be wrong only during transition period. + max_virtual_blocks_to_create: 1, + } + } else if next_block.number == 0 { + // Special case: + // - For environments, where genesis block was created before virtual block upgrade it doesn't matter what we put here. + // - Otherwise, we need to put actual values here. We cannot create next L2 block with block_number=0 and `max_virtual_blocks_to_create=0` + // because of SystemContext requirements. But, due to intrinsics of SystemContext, block.number still will be resolved to 0. + L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + } + } else { + // We need to reset L2 block info in storage to process transaction in the current block context. + // Actual resetting will be done after `storage_view` is created. + let prev_block_number = resolved_block_info.state_l2_block_number - 1; + let prev_l2_block = read_stored_l2_block(connection, prev_block_number) .await .context("failed reading previous L2 block info")?; - l2_block_info_to_reset = Some(prev_l2_block_info); - L2BlockEnv { - number: current_l2_block_info.l2_block_number, - timestamp: current_l2_block_info.l2_block_timestamp, - prev_block_hash: prev_l2_block_info.l2_block_hash, - max_virtual_blocks_to_create: 1, - } - }; - - Ok((next_l2_block_info, l2_block_info_to_reset)) - } - - /// This method is blocking. - fn setup_storage_view(&mut self, tx: &Transaction) { - let storage_view_setup_started_at = Instant::now(); - if let Some(nonce) = self.execution_args.enforced_nonce { - let nonce_key = get_nonce_key(&tx.initiator_account()); - let full_nonce = self.storage_view.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - self.storage_view - .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); - } - - let payer = tx.payer(); - let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(self.storage_view.read_value(&balance_key)); - current_balance += self.execution_args.added_balance; - self.storage_view - .set_value(balance_key, u256_to_h256(current_balance)); - - // Reset L2 block info if necessary. - if let Some(l2_block_info_to_reset) = self.l2_block_info_to_reset { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = pack_block_info( - l2_block_info_to_reset.l2_block_number as u64, - l2_block_info_to_reset.l2_block_timestamp, - ); - self.storage_view - .set_value(l2_block_info_key, u256_to_h256(l2_block_info)); - - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - self.storage_view.set_value( - l2_block_txs_rolling_hash_key, - l2_block_info_to_reset.txs_rolling_hash, - ); + let mut prev_block_hash = connection + .blocks_web3_dal() + .get_l2_block_hash(prev_block_number) + .await + .map_err(DalError::generalize)?; + if prev_block_hash.is_none() { + // We might need to load the previous block hash from the snapshot recovery metadata + let snapshot_recovery = connection + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .map_err(DalError::generalize)?; + prev_block_hash = snapshot_recovery.and_then(|recovery| { + (recovery.l2_block_number == prev_block_number).then_some(recovery.l2_block_hash) + }); } - let storage_view_setup_time = storage_view_setup_started_at.elapsed(); - // We don't want to emit too many logs. - if storage_view_setup_time > Duration::from_millis(10) { - tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + current_block = Some(prev_l2_block); + L2BlockEnv { + number: next_block.number, + timestamp: next_block.timestamp, + prev_block_hash: prev_block_hash.with_context(|| { + format!("missing hash for previous L2 block #{prev_block_number}") + })?, + max_virtual_blocks_to_create: 1, } - } - - fn prepare_env( - shared_args: TxSharedArgs, - execution_args: &TxExecutionArgs, - resolved_block_info: &ResolvedBlockInfo, - next_l2_block_info: L2BlockEnv, - ) -> (SystemEnv, L1BatchEnv) { - let TxSharedArgs { - operator_account, - fee_input, - base_system_contracts, - validation_computational_gas_limit, - chain_id, - .. - } = shared_args; + }; - // In case we are executing in a past block, we'll use the historical fee data. - let fee_input = resolved_block_info - .historical_fee_input - .unwrap_or(fee_input); - let system_env = SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: resolved_block_info.protocol_version, - base_system_smart_contracts: base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: execution_args.execution_mode, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - }; - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: resolved_block_info.vm_l1_batch_number, - timestamp: resolved_block_info.l1_batch_timestamp, - fee_input, - fee_account: *operator_account.address(), - enforced_base_fee: execution_args.enforced_base_fee, - first_l2_block: next_l2_block_info, - }; - (system_env, l1_batch_env) - } - - /// This method is blocking. - fn into_vm( - mut self, - tx: &Transaction, - adjust_pubdata_price: bool, - ) -> (BoxedVm<'a>, StoragePtr>) { - self.setup_storage_view(tx); - let protocol_version = self.system_env.version; - if adjust_pubdata_price { - self.l1_batch_env.fee_input = adjust_pubdata_price_for_tx( - self.l1_batch_env.fee_input, - tx.gas_per_pubdata_byte_limit(), - self.l1_batch_env.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); - }; - - let storage_view = self.storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( - self.l1_batch_env, - self.system_env, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); - - (vm, storage_view) - } + Ok((next_block, current_block)) } -#[allow(clippy::too_many_arguments)] -pub(super) fn apply_vm_in_sandbox( - vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, - execution_args: &TxExecutionArgs, - connection_pool: &ConnectionPool, - tx: Transaction, - block_args: BlockArgs, // Block arguments for the transaction. - state_override: Option, - apply: impl FnOnce( - &mut VmInstance>, HistoryDisabled>, - Transaction, - ProtocolVersionId, - ) -> T, -) -> anyhow::Result { - let stage_started_at = Instant::now(); - let span = tracing::debug_span!("initialization").entered(); - - let rt_handle = vm_permit.rt_handle(); - let connection = rt_handle - .block_on(connection_pool.connection_tagged("api")) - .context("failed acquiring DB connection")?; - let connection_acquire_time = stage_started_at.elapsed(); - // We don't want to emit too many logs. - if connection_acquire_time > Duration::from_millis(10) { - tracing::debug!("Obtained connection (took {connection_acquire_time:?})"); - } - - let sandbox = rt_handle.block_on(Sandbox::new( - connection, - shared_args, - execution_args, - block_args, - state_override.as_ref().unwrap_or(&StateOverride::default()), - ))?; - let protocol_version = sandbox.system_env.version; - let (mut vm, storage_view) = sandbox.into_vm(&tx, adjust_pubdata_price); - - SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].observe(stage_started_at.elapsed()); - span.exit(); +fn prepare_env( + setup_args: TxSetupArgs, + resolved_block_info: &ResolvedBlockInfo, + next_block: L2BlockEnv, +) -> (SystemEnv, L1BatchEnv) { + let TxSetupArgs { + execution_mode, + operator_account, + fee_input, + base_system_contracts, + validation_computational_gas_limit, + chain_id, + enforced_base_fee, + .. + } = setup_args; + + // In case we are executing in a past block, we'll use the historical fee data. + let fee_input = resolved_block_info + .historical_fee_input + .unwrap_or(fee_input); + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: resolved_block_info.protocol_version, + base_system_smart_contracts: base_system_contracts + .get_by_protocol_version(resolved_block_info.protocol_version), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: resolved_block_info.vm_l1_batch_number, + timestamp: resolved_block_info.l1_batch_timestamp, + fee_input, + fee_account: *operator_account.address(), + enforced_base_fee, + first_l2_block: next_block, + }; + (system_env, l1_batch_env) +} - let tx_id = format!( - "{:?}-{}", - tx.initiator_account(), - tx.nonce().unwrap_or(Nonce(0)) +async fn read_stored_l2_block( + connection: &mut Connection<'_, Core>, + l2_block_number: L2BlockNumber, +) -> anyhow::Result { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, ); + let l2_block_info = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) + .await?; + let (l2_block_number_from_state, timestamp) = unpack_block_info(h256_to_u256(l2_block_info)); - let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); - let result = apply(&mut vm, tx, protocol_version); - let vm_execution_took = execution_latency.observe(); - - let memory_metrics = vm.record_vm_memory_metrics(); - vm_metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - storage_view.as_ref().borrow_mut().metrics(), + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - Ok(result) -} - -#[derive(Debug, Clone, Copy)] -struct StoredL2BlockInfo { - l2_block_number: u32, - l2_block_timestamp: u64, - l2_block_hash: H256, - txs_rolling_hash: H256, -} - -impl StoredL2BlockInfo { - /// If `l2_block_hash` is `None`, it needs to be fetched from the storage. - async fn new( - connection: &mut Connection<'_, Core>, - l2_block_number: L2BlockNumber, - l2_block_hash: Option, - ) -> anyhow::Result { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = connection - .storage_web3_dal() - .get_historical_value_unchecked(l2_block_info_key.hashed_key(), l2_block_number) - .await - .context("failed reading L2 block info from VM state")?; - let (l2_block_number_from_state, l2_block_timestamp) = - unpack_block_info(h256_to_u256(l2_block_info)); - - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let txs_rolling_hash = connection - .storage_web3_dal() - .get_historical_value_unchecked( - l2_block_txs_rolling_hash_key.hashed_key(), - l2_block_number, - ) - .await - .context("failed reading transaction rolling hash from VM state")?; - - let l2_block_hash = if let Some(hash) = l2_block_hash { - hash - } else { - connection - .blocks_web3_dal() - .get_l2_block_hash(l2_block_number) - .await - .map_err(DalError::generalize)? - .with_context(|| format!("L2 block #{l2_block_number} not present in storage"))? - }; + let txs_rolling_hash = connection + .storage_web3_dal() + .get_historical_value_unchecked(l2_block_txs_rolling_hash_key.hashed_key(), l2_block_number) + .await?; - Ok(Self { - l2_block_number: l2_block_number_from_state as u32, - l2_block_timestamp, - l2_block_hash, - txs_rolling_hash, - }) - } + Ok(StoredL2BlockEnv { + number: l2_block_number_from_state as u32, + timestamp, + txs_rolling_hash, + }) } #[derive(Debug)] @@ -433,16 +245,19 @@ impl BlockArgs { ) } - fn is_estimate_like(&self) -> bool { - matches!( - self.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - | api::BlockId::Number(api::BlockNumber::Latest) - | api::BlockId::Number(api::BlockNumber::Committed) - ) + pub(crate) async fn default_eth_call_gas( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let protocol_version = self + .resolve_block_info(connection) + .await + .context("failed to resolve block info")? + .protocol_version; + Ok(get_eth_call_gas_limit(protocol_version.into()).into()) } - pub(crate) async fn resolve_block_info( + async fn resolve_block_info( &self, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { @@ -483,7 +298,7 @@ impl BlockArgs { .context("resolved L2 block disappeared from storage")? }; - let historical_fee_input = if !self.is_estimate_like() { + let historical_fee_input = if !self.resolves_to_latest_sealed_l2_block() { let l2_block_header = connection .blocks_dal() .get_l2_block_header(self.resolved_block_number) diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 741bcaea18f4..d22d7de47d0f 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,88 +1,29 @@ //! Implementation of "executing" methods, e.g. `eth_call`. -use anyhow::Context as _; -use tracing::{span, Level}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::{ - interface::{ - TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs, VmInterface, - }, - tracers::StorageInvocations, - MultiVMTracer, -}; -use zksync_types::{ - l2::L2Tx, transaction_request::CallOverrides, ExecuteTransactionCommon, Nonce, - PackedEthSignature, Transaction, U256, +use async_trait::async_trait; +use zksync_dal::{Connection, Core}; +use zksync_multivm::interface::{ + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; +use zksync_types::{api::state_override::StateOverride, l2::L2Tx}; +use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - apply, testonly::MockTransactionExecutor, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, - VmPermit, + apply, storage::StorageWithOverrides, vm_metrics, BlockArgs, TxSetupArgs, VmPermit, + SANDBOX_METRICS, }; -use crate::execution_sandbox::api::state_override::StateOverride; - -#[derive(Debug)] -pub(crate) struct TxExecutionArgs { - pub execution_mode: TxExecutionMode, - pub enforced_nonce: Option, - pub added_balance: U256, - pub enforced_base_fee: Option, - pub missed_storage_invocation_limit: usize, -} - -impl TxExecutionArgs { - pub fn for_validation(tx: &L2Tx) -> Self { - Self { - execution_mode: TxExecutionMode::VerifyExecute, - enforced_nonce: Some(tx.nonce()), - added_balance: U256::zero(), - enforced_base_fee: Some(tx.common_data.fee.max_fee_per_gas.as_u64()), - missed_storage_invocation_limit: usize::MAX, - } - } - - fn for_eth_call( - enforced_base_fee: Option, - vm_execution_cache_misses_limit: Option, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); - Self { - execution_mode: TxExecutionMode::EthCall, - enforced_nonce: None, - added_balance: U256::zero(), - enforced_base_fee, - missed_storage_invocation_limit, - } - } - - pub fn for_gas_estimate( - vm_execution_cache_misses_limit: Option, - tx: &Transaction, - base_fee: u64, - ) -> Self { - let missed_storage_invocation_limit = vm_execution_cache_misses_limit.unwrap_or(usize::MAX); - // For L2 transactions we need to explicitly put enough balance into the account of the users - // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &tx.common_data { - ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, - ExecuteTransactionCommon::L1(_) => U256::zero(), - ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), - }; - - Self { - execution_mode: TxExecutionMode::EstimateFee, - missed_storage_invocation_limit, - enforced_nonce: tx.nonce(), - added_balance, - enforced_base_fee: Some(base_fee), - } - } -} +use crate::execution_sandbox::vm_metrics::SandboxStage; #[derive(Debug, Clone)] -pub(crate) struct TransactionExecutionOutput { +pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, + /// Traced calls if requested. + pub call_traces: Vec, /// Execution metrics. pub metrics: TransactionExecutionMetrics, /// Were published bytecodes OK? @@ -91,114 +32,127 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] -pub(crate) enum TransactionExecutor { - Real, +pub enum TransactionExecutor { + Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only - Mock(MockTransactionExecutor), + Mock(MockOneshotExecutor), } impl TransactionExecutor { + pub fn real(missed_storage_invocation_limit: usize) -> Self { + let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor + .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); + Self::Real(executor) + } + /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) #[allow(clippy::too_many_arguments)] - #[tracing::instrument(skip_all)] + #[tracing::instrument(level = "debug", skip_all)] pub async fn execute_tx_in_sandbox( &self, vm_permit: VmPermit, - shared_args: TxSharedArgs, - // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - // current L1 prices for gas or pubdata. - adjust_pubdata_price: bool, + setup_args: TxSetupArgs, execution_args: TxExecutionArgs, - connection_pool: ConnectionPool, - tx: Transaction, + connection: Connection<'static, Core>, block_args: BlockArgs, state_override: Option, - custom_tracers: Vec, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { - if let Self::Mock(mock_executor) = self { - return mock_executor.execute_tx(&tx, &block_args); - } - - let total_factory_deps = tx.execute.factory_deps.len() as u16; - - let (published_bytecodes, execution_result) = tokio::task::spawn_blocking(move || { - let span = span!(Level::DEBUG, "execute_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - adjust_pubdata_price, - &execution_args, - &connection_pool, - tx, - block_args, - state_override, - |vm, tx, _| { - let storage_invocation_tracer = - StorageInvocations::new(execution_args.missed_storage_invocation_limit); - let custom_tracers: Vec<_> = custom_tracers - .into_iter() - .map(|tracer| tracer.into_boxed()) - .chain(vec![storage_invocation_tracer.into_tracer_pointer()]) - .collect(); - vm.inspect_transaction_with_bytecode_compression( - custom_tracers.into(), - tx, - true, - ) - }, - ); - span.exit(); - result - }) - .await - .context("transaction execution panicked")??; + let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let state_override = state_override.unwrap_or_default(); + let storage = StorageWithOverrides::new(storage, &state_override); + + let result = self + .inspect_transaction_with_bytecode_compression( + storage, + env, + execution_args, + tracing_params, + ) + .await?; + drop(vm_permit); let metrics = - vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); + vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); Ok(TransactionExecutionOutput { - vm: execution_result, + vm: *result.tx_result, + call_traces: result.call_traces, metrics, - are_published_bytecodes_ok: published_bytecodes.is_ok(), + are_published_bytecodes_ok: result.compression_result.is_ok(), }) } +} - #[allow(clippy::too_many_arguments)] - pub async fn execute_tx_eth_call( - &self, - vm_permit: VmPermit, - shared_args: TxSharedArgs, - connection_pool: ConnectionPool, - call_overrides: CallOverrides, - mut tx: L2Tx, - block_args: BlockArgs, - vm_execution_cache_misses_limit: Option, - custom_tracers: Vec, - state_override: Option, - ) -> anyhow::Result { - let execution_args = TxExecutionArgs::for_eth_call( - call_overrides.enforced_base_fee, - vm_execution_cache_misses_limit, - ); +impl From for TransactionExecutor { + fn from(executor: MockOneshotExecutor) -> Self { + Self::Mock(executor) + } +} - if tx.common_data.signature.is_empty() { - tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); +#[async_trait] +impl OneshotExecutor for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracing_params: OneshotTracingParams, + ) -> anyhow::Result { + match self { + Self::Real(executor) => { + executor + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) + .await + } + Self::Mock(executor) => { + executor + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) + .await + } } + } +} - let output = self - .execute_tx_in_sandbox( - vm_permit, - shared_args, - false, - execution_args, - connection_pool, - tx.into(), - block_args, - state_override, - custom_tracers, - ) - .await?; - Ok(output.vm) +#[async_trait] +impl TransactionValidator for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { + match self { + Self::Real(executor) => { + executor + .validate_transaction(storage, env, tx, validation_params) + .await + } + Self::Mock(executor) => { + executor + .validate_transaction(storage, env, tx, validation_params) + .await + } + } } } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f7c876679cb0..79c6123642cc 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -5,18 +5,17 @@ use std::{ use anyhow::Context as _; use rand::{thread_rng, Rng}; -use tokio::runtime::Handle; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; +use zksync_multivm::interface::TxExecutionMode; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; +pub use self::execute::TransactionExecutor; // FIXME (PLA-1018): remove use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{TransactionExecutor, TxExecutionArgs}, - tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; @@ -27,10 +26,8 @@ mod apply; mod error; mod execute; mod storage; -pub mod testonly; #[cfg(test)] mod tests; -mod tracers; mod validate; mod vm_metrics; @@ -40,17 +37,9 @@ mod vm_metrics; /// as a proof that the caller obtained a token from `VmConcurrencyLimiter`, #[derive(Debug, Clone)] pub struct VmPermit { - /// A handle to the runtime that is used to query the VM storage. - rt_handle: Handle, _permit: Arc, } -impl VmPermit { - fn rt_handle(&self) -> &Handle { - &self.rt_handle - } -} - /// Barrier-like synchronization primitive allowing to close a [`VmConcurrencyLimiter`] it's attached to /// so that it doesn't issue new permits, and to wait for all permits to drop. #[derive(Debug, Clone)] @@ -103,7 +92,6 @@ impl VmConcurrencyBarrier { pub struct VmConcurrencyLimiter { /// Semaphore that limits the number of concurrent VM executions. limiter: Arc, - rt_handle: Handle, } impl VmConcurrencyLimiter { @@ -116,7 +104,6 @@ impl VmConcurrencyLimiter { let this = Self { limiter: Arc::clone(&limiter), - rt_handle: Handle::current(), }; let barrier = VmConcurrencyBarrier { limiter, @@ -144,7 +131,6 @@ impl VmConcurrencyLimiter { } Some(VmPermit { - rt_handle: self.rt_handle.clone(), _permit: Arc::new(permit), }) } @@ -163,9 +149,10 @@ async fn get_pending_state( Ok((block_id, resolved_block_number)) } -/// Arguments for VM execution not specific to a particular transaction. +/// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSharedArgs { +pub struct TxSetupArgs { + pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, pub base_system_contracts: MultiVMBaseSystemContracts, @@ -173,19 +160,25 @@ pub(crate) struct TxSharedArgs { pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, pub whitelisted_tokens_for_aa: Vec
, + pub enforced_base_fee: Option, } -impl TxSharedArgs { +impl TxSetupArgs { #[cfg(test)] - pub fn mock(base_system_contracts: MultiVMBaseSystemContracts) -> Self { + pub fn mock( + execution_mode: TxExecutionMode, + base_system_contracts: MultiVMBaseSystemContracts, + ) -> Self { Self { + execution_mode, operator_account: AccountTreeId::default(), fee_input: BatchFeeInput::l1_pegged(55, 555), base_system_contracts, caches: PostgresStorageCaches::new(1, 1), validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), - whitelisted_tokens_for_aa: Vec::new(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, } } } @@ -215,7 +208,7 @@ impl BlockStartInfoInner { /// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] -pub(crate) struct BlockStartInfo { +pub struct BlockStartInfo { cached_pruning_info: Arc>, max_cache_age: Duration, } @@ -331,7 +324,7 @@ impl BlockStartInfo { } #[derive(Debug, thiserror::Error)] -pub(crate) enum BlockArgsError { +pub enum BlockArgsError { #[error("Block is pruned; first retained block is {0}")] Pruned(L2BlockNumber), #[error("Block is missing, but can appear in the future")] @@ -342,7 +335,7 @@ pub(crate) enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub(crate) struct BlockArgs { +pub struct BlockArgs { block_id: api::BlockId, resolved_block_number: L2BlockNumber, l1_batch_timestamp_s: Option, diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/node/api_server/src/execution_sandbox/testonly.rs deleted file mode 100644 index 59fa2e38db7a..000000000000 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::fmt; - -use zksync_multivm::interface::{ - ExecutionResult, TransactionExecutionMetrics, VmExecutionResultAndLogs, -}; -use zksync_types::{l2::L2Tx, ExecuteTransactionCommon, Transaction}; - -use super::{ - execute::{TransactionExecutionOutput, TransactionExecutor}, - validate::ValidationError, - BlockArgs, -}; - -type TxResponseFn = dyn Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + Send + Sync; - -pub struct MockTransactionExecutor { - call_responses: Box, - tx_responses: Box, -} - -impl fmt::Debug for MockTransactionExecutor { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("MockTransactionExecutor") - .finish_non_exhaustive() - } -} - -impl Default for MockTransactionExecutor { - fn default() -> Self { - Self { - call_responses: Box::new(|tx, _| { - panic!( - "Unexpected call with data {}", - hex::encode(tx.execute.calldata()) - ); - }), - tx_responses: Box::new(|tx, _| { - panic!("Unexpect transaction call: {tx:?}"); - }), - } - } -} - -impl MockTransactionExecutor { - #[cfg(test)] - pub(crate) fn set_call_responses(&mut self, responses: F) - where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, - { - self.call_responses = self.wrap_responses(responses); - } - - #[cfg(test)] - pub(crate) fn set_tx_responses(&mut self, responses: F) - where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, - { - self.tx_responses = self.wrap_responses(responses); - } - - #[cfg(test)] - fn wrap_responses(&mut self, responses: F) -> Box - where - F: Fn(&Transaction, &BlockArgs) -> ExecutionResult + 'static + Send + Sync, - { - Box::new( - move |tx: &Transaction, ba: &BlockArgs| -> VmExecutionResultAndLogs { - VmExecutionResultAndLogs { - result: responses(tx, ba), - logs: Default::default(), - statistics: Default::default(), - refunds: Default::default(), - } - }, - ) - } - - #[cfg(test)] - pub(crate) fn set_tx_responses_with_logs(&mut self, responses: F) - where - F: Fn(&Transaction, &BlockArgs) -> VmExecutionResultAndLogs + 'static + Send + Sync, - { - self.tx_responses = Box::new(responses); - } - - pub(crate) fn validate_tx( - &self, - tx: L2Tx, - block_args: &BlockArgs, - ) -> Result<(), ValidationError> { - let result = (self.tx_responses)(&tx.into(), block_args); - match result.result { - ExecutionResult::Success { .. } => Ok(()), - other => Err(ValidationError::Internal(anyhow::anyhow!( - "transaction validation failed: {other:?}" - ))), - } - } - - pub(crate) fn execute_tx( - &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> anyhow::Result { - let result = self.get_execution_result(tx, block_args); - let output = TransactionExecutionOutput { - vm: result, - metrics: TransactionExecutionMetrics::default(), - are_published_bytecodes_ok: true, - }; - - Ok(output) - } - - fn get_execution_result( - &self, - tx: &Transaction, - block_args: &BlockArgs, - ) -> VmExecutionResultAndLogs { - if let ExecuteTransactionCommon::L2(data) = &tx.common_data { - if data.input.is_none() { - return (self.call_responses)(tx, block_args); - } - } - (self.tx_responses)(tx, block_args) - } -} - -impl From for TransactionExecutor { - fn from(executor: MockTransactionExecutor) -> Self { - Self::Mock(executor) - } -} diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 0a8af35597b3..35103779a49e 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -1,12 +1,31 @@ //! Tests for the VM execution sandbox. +use std::collections::HashMap; + use assert_matches::assert_matches; +use test_casing::test_casing; use zksync_dal::ConnectionPool; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + tracer::ValidationError, + Halt, OneshotTracingParams, TxExecutionArgs, + }, + utils::derive_base_fee_and_gas_per_pubdata, +}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{ + api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, + l2::L2Tx, + transaction_request::PaymasterParams, + K256PrivateKey, Nonce, ProtocolVersionId, Transaction, U256, +}; +use zksync_vm_executor::oneshot::MainOneshotExecutor; -use super::*; -use crate::{execution_sandbox::apply::apply_vm_in_sandbox, tx_sender::ApiContracts}; +use super::{storage::StorageWithOverrides, *}; +use crate::tx_sender::ApiContracts; #[tokio::test] async fn creating_block_args() { @@ -163,45 +182,125 @@ async fn creating_block_args_after_snapshot_recovery() { } #[tokio::test] -async fn instantiating_vm() { +async fn estimating_gas() { let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) .await .unwrap(); - let block_args = BlockArgs::pending(&mut storage).await.unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + test_instantiating_vm(connection, block_args).await; + + let mut connection = pool.connection().await.unwrap(); + let start_info = BlockStartInfo::new(&mut connection, Duration::MAX) .await .unwrap(); - let block_args = BlockArgs::new(&mut storage, api::BlockId::Number(0.into()), &start_info) + let block_args = BlockArgs::new(&mut connection, api::BlockId::Number(0.into()), &start_info) .await .unwrap(); - test_instantiating_vm(pool.clone(), block_args).await; + test_instantiating_vm(connection, block_args).await; } -async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { - let (vm_concurrency_limiter, _) = VmConcurrencyLimiter::new(1); - let vm_permit = vm_concurrency_limiter.acquire().await.unwrap(); - let transaction = create_l2_transaction(10, 100).into(); +async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; - tokio::task::spawn_blocking(move || { - apply_vm_in_sandbox( - vm_permit, - TxSharedArgs::mock(estimate_gas_contracts), - true, - &TxExecutionArgs::for_gas_estimate(None, &transaction, 123), - &pool, - transaction.clone(), - block_args, - None, - |_, received_tx, _| { - assert_eq!(received_tx, transaction); - }, - ) - }) - .await - .expect("VM instantiation panicked") - .expect("VM instantiation errored"); + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = Transaction::from(create_transfer(base_fee, gas_per_pubdata)); + + let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + + let tracing_params = OneshotTracingParams::default(); + let output = MainOneshotExecutor::new(usize::MAX) + .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracing_params) + .await + .unwrap(); + output.compression_result.unwrap(); + let tx_result = *output.tx_result; + assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); +} + +fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + let fee = Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }; + L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + fee, + U256::zero(), + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn validating_transaction(set_balance: bool) { + let pool = ConnectionPool::::test_pool().await; + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) + .await + .unwrap(); + + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + + let call_contracts = ApiContracts::load_from_disk().await.unwrap().eth_call; + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::VerifyExecute, call_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = create_transfer(base_fee, gas_per_pubdata); + + let validation_params = + validate::get_validation_params(&mut connection, &transaction, u32::MAX, &[]) + .await + .unwrap(); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); + let state_override = if set_balance { + let account_override = OverrideAccount { + balance: Some(U256::from(1) << 128), + ..OverrideAccount::default() + }; + StateOverride::new(HashMap::from([( + transaction.initiator_account(), + account_override, + )])) + } else { + StateOverride::default() + }; + let storage = StorageWithOverrides::new(storage, &state_override); + + let validation_result = MainOneshotExecutor::new(usize::MAX) + .validate_transaction(storage, env, transaction, validation_params) + .await + .unwrap(); + if set_balance { + validation_result.expect("validation failed"); + } else { + assert_matches!( + validation_result.unwrap_err(), + ValidationError::FailedTx(Halt::ValidationFailed(reason)) + if reason.to_string().contains("Not enough balance") + ); + } } diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs deleted file mode 100644 index 8d61d896a362..000000000000 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_multivm::{ - interface::{storage::WriteStorage, Call}, - tracers::CallTracer, - vm_latest::HistoryMode, - MultiVMTracer, MultiVmTracerPointer, -}; - -/// Custom tracers supported by our API -#[derive(Debug)] -pub(crate) enum ApiTracer { - CallTracer(Arc>>), -} - -impl ApiTracer { - pub fn into_boxed< - S: WriteStorage, - H: HistoryMode + zksync_multivm::HistoryMode + 'static, - >( - self, - ) -> MultiVmTracerPointer { - match self { - ApiTracer::CallTracer(tracer) => CallTracer::new(tracer.clone()).into_tracer_pointer(), - } - } -} diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index a856386b4562..e9087e608eeb 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -1,23 +1,23 @@ use std::collections::HashSet; use anyhow::Context as _; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - tracers::{ - StorageInvocations, ValidationError as RawValidationError, ValidationTracer, - ValidationTracerParams, - }, - vm_latest::HistoryDisabled, - MultiVMTracer, +use tracing::Instrument; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::interface::{ + executor::TransactionValidator, + tracer::{ValidationError as RawValidationError, ValidationParams}, +}; +use zksync_types::{ + api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, + TRUSTED_TOKEN_SLOTS, }; -use zksync_types::{l2::L2Tx, Address, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ apply, execute::TransactionExecutor, + storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit, + BlockArgs, TxSetupArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -31,88 +31,39 @@ pub(crate) enum ValidationError { } impl TransactionExecutor { + #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn validate_tx_in_sandbox( &self, - connection_pool: ConnectionPool, + mut connection: Connection<'static, Core>, vm_permit: VmPermit, tx: L2Tx, - shared_args: TxSharedArgs, + setup_args: TxSetupArgs, block_args: BlockArgs, computational_gas_limit: u32, ) -> Result<(), ValidationError> { - if let Self::Mock(mock) = self { - return mock.validate_tx(tx, &block_args); - } - - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); - let mut connection = connection_pool - .connection_tagged("api") - .await - .context("failed acquiring DB connection")?; + let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); let validation_params = get_validation_params( &mut connection, &tx, computational_gas_limit, - &shared_args.whitelisted_tokens_for_aa, + &setup_args.whitelisted_tokens_for_aa, ) .await .context("failed getting validation params")?; - drop(connection); - - let execution_args = TxExecutionArgs::for_validation(&tx); - let tx: Transaction = tx.into(); - - let validation_result = tokio::task::spawn_blocking(move || { - let span = tracing::debug_span!("validate_in_sandbox").entered(); - let result = apply::apply_vm_in_sandbox( - vm_permit, - shared_args, - true, - &execution_args, - &connection_pool, - tx, - block_args, - None, - |vm, tx, protocol_version| { - let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); - let span = tracing::debug_span!("validation").entered(); - vm.push_transaction(tx); - - let (tracer, validation_result) = ValidationTracer::::new( - validation_params, - protocol_version.into(), - ); - let result = vm.inspect( - vec![ - tracer.into_tracer_pointer(), - StorageInvocations::new(execution_args.missed_storage_invocation_limit) - .into_tracer_pointer(), - ] - .into(), - VmExecutionMode::OneTx, - ); - - let result = match (result.result, validation_result.get()) { - (_, Some(err)) => Err(RawValidationError::ViolatedRule(err.clone())), - (ExecutionResult::Halt { reason }, _) => { - Err(RawValidationError::FailedTx(reason)) - } - (_, None) => Ok(()), - }; - - stage_latency.observe(); - span.exit(); - result - }, - ); - span.exit(); - result - }) - .await - .context("transaction validation panicked")??; + let (env, storage) = + apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); + let validation_result = self + .validate_transaction(storage, env, tx, validation_params) + .instrument(tracing::debug_span!("validation")) + .await?; + drop(vm_permit); stage_latency.observe(); + + total_latency.observe(); validation_result.map_err(ValidationError::Vm) } } @@ -120,12 +71,12 @@ impl TransactionExecutor { /// Some slots can be marked as "trusted". That is needed for slots which can not be /// trusted to change between validation and execution in general case, but /// sometimes we can safely rely on them to not change often. -async fn get_validation_params( +pub(super) async fn get_validation_params( connection: &mut Connection<'_, Core>, tx: &L2Tx, computational_gas_limit: u32, whitelisted_tokens_for_aa: &[Address], -) -> anyhow::Result { +) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; let paymaster_address = tx.common_data.paymaster_params.paymaster; @@ -164,7 +115,7 @@ async fn get_validation_params( span.exit(); method_latency.observe(); - Ok(ValidationTracerParams { + Ok(ValidationParams { user_address, paymaster_address, trusted_slots, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index ffe87be899ba..cbfe7e90bd0f 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -4,77 +4,14 @@ use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; use zksync_multivm::{ - interface::{ - storage::StorageViewMetrics, TransactionExecutionMetrics, VmEvent, - VmExecutionResultAndLogs, VmMemoryMetrics, - }, + interface::{TransactionExecutionMetrics, VmEvent, VmExecutionResultAndLogs}, utils::StorageWritesDeduplicator, }; -use zksync_shared_metrics::InteractionType; use zksync_types::H256; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::utils::ReportFilter; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "type", rename_all = "snake_case")] -enum SizeType { - Inner, - History, -} - -const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ - 1_000.0, - 10_000.0, - 100_000.0, - 500_000.0, - 1_000_000.0, - 5_000_000.0, - 10_000_000.0, - 50_000_000.0, - 100_000_000.0, - 500_000_000.0, - 1_000_000_000.0, -]); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_memory")] -struct RuntimeContextMemoryMetrics { - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - event_sink_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - memory_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - decommitter_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_view_cache_size: Histogram, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - full: Histogram, -} - -#[vise::register] -static MEMORY_METRICS: vise::Global = vise::Global::new(); - -const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_storage_interaction")] -struct RuntimeContextStorageMetrics { - #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] - amount: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration_per_unit: Family>, - #[metrics(buckets = Buckets::ZERO_TO_ONE)] - ratio: Histogram, -} - -#[vise::register] -static STORAGE_METRICS: vise::Global = vise::Global::new(); - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum SandboxStage { @@ -185,84 +122,6 @@ pub(super) struct ExecutionMetrics { #[vise::register] pub(super) static EXECUTION_METRICS: vise::Global = vise::Global::new(); -pub(super) fn report_vm_memory_metrics( - tx_id: &str, - memory_metrics: &VmMemoryMetrics, - vm_execution_took: Duration, - storage_metrics: StorageViewMetrics, -) { - MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); - MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); - MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); - MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); - MEMORY_METRICS.decommitter_size[&SizeType::Inner] - .observe(memory_metrics.decommittment_processor_inner); - MEMORY_METRICS.decommitter_size[&SizeType::History] - .observe(memory_metrics.decommittment_processor_history); - MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); - MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); - - MEMORY_METRICS - .storage_view_cache_size - .observe(storage_metrics.cache_size); - MEMORY_METRICS - .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); - - let total_storage_invocations = storage_metrics.get_value_storage_invocations - + storage_metrics.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; - - STORAGE_METRICS.amount[&InteractionType::Missed] - .observe(storage_metrics.storage_invocations_missed); - STORAGE_METRICS.amount[&InteractionType::GetValue] - .observe(storage_metrics.get_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::SetValue] - .observe(storage_metrics.set_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); - - STORAGE_METRICS.duration[&InteractionType::Missed] - .observe(storage_metrics.time_spent_on_storage_missed); - STORAGE_METRICS.duration[&InteractionType::GetValue] - .observe(storage_metrics.time_spent_on_get_value); - STORAGE_METRICS.duration[&InteractionType::SetValue] - .observe(storage_metrics.time_spent_on_set_value); - STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); - - if total_storage_invocations > 0 { - STORAGE_METRICS.duration_per_unit[&InteractionType::Total] - .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); - } - if storage_metrics.storage_invocations_missed > 0 { - let duration_per_unit = storage_metrics - .time_spent_on_storage_missed - .div_f64(storage_metrics.storage_invocations_missed as f64); - STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); - } - - STORAGE_METRICS - .ratio - .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - tracing::info!( - "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ - {} get_value_storage_invocations, {} set_value_storage_invocations, \ - vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ - (missed: {:?} get: {:?} set: {:?})", - storage_metrics.storage_invocations_missed, - storage_metrics.get_value_storage_invocations, - storage_metrics.set_value_storage_invocations, - storage_metrics.time_spent_on_storage_missed, - storage_metrics.time_spent_on_get_value, - storage_metrics.time_spent_on_set_value, - ); - } -} - pub(super) fn collect_tx_execution_metrics( contracts_deployed: u16, result: &VmExecutionResultAndLogs, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index cec2e14ddb26..44eaae2e3eee 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,10 +10,13 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{ + OneshotTracingParams, TransactionExecutionMetrics, TxExecutionArgs, TxExecutionMode, + VmExecutionResultAndLogs, + }, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_eth_call_gas_limit, get_max_batch_gas_limit, + get_max_batch_gas_limit, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -41,8 +44,8 @@ pub(super) use self::result::SubmitTxError; use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; use crate::{ execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSharedArgs, - VmConcurrencyBarrier, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, + BlockArgs, SubmitTxStage, TransactionExecutor, TxSetupArgs, VmConcurrencyBarrier, + VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, }; @@ -140,6 +143,38 @@ impl MultiVMBaseSystemContracts { } } } + + pub fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } } /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and @@ -169,32 +204,8 @@ impl ApiContracts { /// Blocking version of [`Self::load_from_disk()`]. pub fn load_from_disk_blocking() -> Self { Self { - estimate_gas: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - }, - eth_call: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::playground_post_1_5_0_increased_memory(), - }, + estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), + eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), } } } @@ -252,6 +263,10 @@ impl TxSenderBuilder { self.whitelisted_tokens_for_aa_cache.unwrap_or_else(|| { Arc::new(RwLock::new(self.config.whitelisted_tokens_for_aa.clone())) }); + let missed_storage_invocation_limit = self + .config + .vm_execution_cache_misses_limit + .unwrap_or(usize::MAX); TxSender(Arc::new(TxSenderInner { sender_config: self.config, @@ -263,7 +278,7 @@ impl TxSenderBuilder { storage_caches, whitelisted_tokens_for_aa_cache, sealer, - executor: TransactionExecutor::Real, + executor: TransactionExecutor::real(missed_storage_invocation_limit), })) } } @@ -320,7 +335,7 @@ pub struct TxSenderInner { // Cache for white-listed tokens. pub(super) whitelisted_tokens_for_aa_cache: Arc>>, /// Batch sealer used to check whether transaction can be executed by the sequencer. - sealer: Arc, + pub(super) sealer: Arc, pub(super) executor: TransactionExecutor, } @@ -346,7 +361,7 @@ impl TxSender { self.0.whitelisted_tokens_for_aa_cache.read().await.clone() } - async fn acquire_replica_connection(&self) -> anyhow::Result> { + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool .connection_tagged("api") @@ -368,26 +383,23 @@ impl TxSender { stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); - let shared_args = self.shared_args().await?; + let setup_args = self.call_args(&tx, None).await?; let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; let mut connection = self.acquire_replica_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; - drop(connection); let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit.clone(), - shared_args.clone(), - true, - TxExecutionArgs::for_validation(&tx), - self.0.replica_connection_pool.clone(), - tx.clone().into(), + setup_args.clone(), + TxExecutionArgs::for_validation(tx.clone()), + connection, block_args, None, - vec![], + OneshotTracingParams::default(), ) .await?; tracing::info!( @@ -398,15 +410,16 @@ impl TxSender { let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::VerifyExecute); + let connection = self.acquire_replica_connection().await?; let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; let validation_result = self .0 .executor .validate_tx_in_sandbox( - self.0.replica_connection_pool.clone(), + connection, vm_permit, tx.clone(), - shared_args, + setup_args, block_args, computational_gas_limit, ) @@ -462,14 +475,23 @@ impl TxSender { /// **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. /// Thus, you shouldn't call it if you're holding a DB connection already. - async fn shared_args(&self) -> anyhow::Result { + async fn call_args( + &self, + tx: &L2Tx, + call_overrides: Option<&CallOverrides>, + ) -> anyhow::Result { let fee_input = self .0 .batch_fee_input_provider .get_batch_fee_input() .await .context("cannot get batch fee input")?; - Ok(TxSharedArgs { + Ok(TxSetupArgs { + execution_mode: if call_overrides.is_some() { + TxExecutionMode::EthCall + } else { + TxExecutionMode::VerifyExecute + }, operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), fee_input, base_system_contracts: self.0.api_contracts.eth_call.clone(), @@ -480,6 +502,11 @@ impl TxSender { .validation_computational_gas_limit, chain_id: self.0.sender_config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: if let Some(overrides) = call_overrides { + overrides.enforced_base_fee + } else { + Some(tx.common_data.fee.max_fee_per_gas.as_u64()) + }, }) } @@ -696,32 +723,29 @@ impl TxSender { } } - let shared_args = self.shared_args_for_gas_estimate(fee_model_params).await; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - let execution_args = - TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee); + let setup_args = self.args_for_gas_estimate(fee_model_params, base_fee).await; + let execution_args = TxExecutionArgs::for_gas_estimate(tx); + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor .execute_tx_in_sandbox( vm_permit, - shared_args, - true, + setup_args, execution_args, - self.0.replica_connection_pool.clone(), - tx.clone(), + connection, block_args, state_override, - vec![], + OneshotTracingParams::default(), ) .await?; Ok((execution_output.vm, execution_output.metrics)) } - async fn shared_args_for_gas_estimate(&self, fee_input: BatchFeeInput) -> TxSharedArgs { + async fn args_for_gas_estimate(&self, fee_input: BatchFeeInput, base_fee: u64) -> TxSetupArgs { let config = &self.0.sender_config; - - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EstimateFee, operator_account: AccountTreeId::new(config.fee_account_addr), fee_input, // We want to bypass the computation gas limit check for gas estimation @@ -730,6 +754,7 @@ impl TxSender { caches: self.storage_caches(), chain_id: config.chain_id, whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, + enforced_base_fee: Some(base_fee), } } @@ -989,7 +1014,7 @@ impl TxSender { .await } - pub(super) async fn eth_call( + pub async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, @@ -998,23 +1023,23 @@ impl TxSender { ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + let setup_args = self.call_args(&tx, Some(&call_overrides)).await?; - let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit; - self.0 + let connection = self.acquire_replica_connection().await?; + let result = self + .0 .executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - self.shared_args().await?, - self.0.replica_connection_pool.clone(), - call_overrides, - tx, + setup_args, + TxExecutionArgs::for_eth_call(tx), + connection, block_args, - vm_execution_cache_misses_limit, - vec![], state_override, + OneshotTracingParams::default(), ) - .await? - .into_api_call_result() + .await?; + result.vm.into_api_call_result() } pub async fn gas_price(&self) -> anyhow::Result { @@ -1067,19 +1092,4 @@ impl TxSender { } Ok(()) } - - pub(crate) async fn get_default_eth_call_gas( - &self, - block_args: BlockArgs, - ) -> anyhow::Result { - let mut connection = self.acquire_replica_connection().await?; - - let protocol_version = block_args - .resolve_block_info(&mut connection) - .await - .context("failed to resolve block info")? - .protocol_version; - - Ok(get_eth_call_gas_limit(protocol_version.into())) - } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 06b6b7a1301b..0ac3eb0b4f38 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -1,17 +1,18 @@ //! Tests for the transaction sender. +use std::time::Duration; + use assert_matches::assert_matches; use zksync_multivm::interface::ExecutionResult; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_types::{api, get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::*; -use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, web3::testonly::create_test_tx_sender, -}; +use crate::{execution_sandbox::BlockStartInfo, web3::testonly::create_test_tx_sender}; #[tokio::test] async fn getting_nonce_for_account() { @@ -31,7 +32,7 @@ async fn getting_nonce_for_account() { .await .unwrap(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); @@ -81,7 +82,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { .await; let l2_chain_id = L2ChainId::default(); - let tx_executor = MockTransactionExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default().into(); let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; storage @@ -136,7 +137,7 @@ async fn submitting_tx_requires_one_connection() { .unwrap(); drop(storage); - let mut tx_executor = MockTransactionExecutor::default(); + let mut tx_executor = MockOneshotExecutor::default(); tx_executor.set_tx_responses(move |received_tx, _| { assert_eq!(received_tx.hash(), tx_hash); ExecutionResult::Success { output: vec![] } @@ -155,3 +156,47 @@ async fn submitting_tx_requires_one_connection() { .unwrap() .expect("transaction is not persisted"); } + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) + .await + .unwrap(); + let block_id = api::BlockId::Number(api::BlockNumber::Latest); + let block_args = BlockArgs::new(&mut storage, block_id, &start_info) + .await + .unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = tx_executor.into(); + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index c3e116d39928..de7635263735 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -19,6 +19,12 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn consensus_global_config(&self) -> RpcResult> { + self.consensus_global_config_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn consensus_genesis(&self) -> RpcResult> { self.consensus_genesis_impl() .await diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e71f4bd1e1ef..ad00f6a878b9 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -1,10 +1,9 @@ -use std::sync::Arc; - use anyhow::Context as _; -use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::{Call, CallType, ExecutionResult}, + interface::{ + Call, CallType, ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; @@ -19,7 +18,7 @@ use zksync_types::{ use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::{ApiTracer, TxSharedArgs}, + execution_sandbox::TxSetupArgs, tx_sender::{ApiContracts, TxSenderConfig}, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; @@ -167,29 +166,20 @@ impl DebugNamespace { .state .resolve_block_args(&mut connection, block_id) .await?; - drop(connection); - self.current_method().set_block_diff( self.state .last_sealed_l2_block .diff_with_block_args(&block_args), ); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; - let shared_args = self.shared_args().await; + let setup_args = self.call_args(call_overrides.enforced_base_fee).await; let vm_permit = self .state .tx_sender @@ -199,29 +189,25 @@ impl DebugNamespace { let vm_permit = vm_permit.context("cannot acquire VM permit")?; // We don't need properly trace if we only need top call - let call_tracer_result = Arc::new(OnceCell::default()); - let custom_tracers = if only_top_call { - vec![] - } else { - vec![ApiTracer::CallTracer(call_tracer_result.clone())] + let tracing_params = OneshotTracingParams { + trace_calls: !only_top_call, }; + let connection = self.state.acquire_connection().await?; let executor = &self.state.tx_sender.0.executor; let result = executor - .execute_tx_eth_call( + .execute_tx_in_sandbox( vm_permit, - shared_args, - self.state.connection_pool.clone(), - call_overrides, - tx.clone(), + setup_args, + TxExecutionArgs::for_eth_call(tx.clone()), + connection, block_args, - self.sender_config().vm_execution_cache_misses_limit, - custom_tracers, None, + tracing_params, ) .await?; - let (output, revert_reason) = match result.result { + let (output, revert_reason) = match result.vm.result { ExecutionResult::Success { output, .. } => (output, None), ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), ExecutionResult::Halt { reason } => { @@ -232,26 +218,22 @@ impl DebugNamespace { } }; - // We had only one copy of Arc this arc is already dropped it's safe to unwrap - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); let call = Call::new_high_level( tx.common_data.fee.gas_limit.as_u64(), - result.statistics.gas_used, + result.vm.statistics.gas_used, tx.execute.value, tx.execute.calldata, output, revert_reason, - trace, + result.call_traces, ); Ok(Self::map_call(call, false)) } - async fn shared_args(&self) -> TxSharedArgs { + async fn call_args(&self, enforced_base_fee: Option) -> TxSetupArgs { let sender_config = self.sender_config(); - TxSharedArgs { + TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, operator_account: AccountTreeId::default(), fee_input: self.batch_fee_input, base_system_contracts: self.api_contracts.eth_call.clone(), @@ -263,6 +245,7 @@ impl DebugNamespace { .tx_sender .read_whitelisted_tokens_for_aa_cache() .await, + enforced_base_fee, } } } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 604d38ef94ab..26f4aa2b0b5f 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -21,18 +21,35 @@ impl EnNamespace { Self { state } } + pub async fn consensus_global_config_impl( + &self, + ) -> Result, Web3Error> { + let mut conn = self.state.acquire_connection().await?; + let Some(cfg) = conn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { + return Ok(None); + }; + Ok(Some(en::ConsensusGlobalConfig( + zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + ))) + } + pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { let mut conn = self.state.acquire_connection().await?; - let Some(genesis) = conn + let Some(cfg) = conn .consensus_dal() - .genesis() + .global_config() .await - .map_err(DalError::generalize)? + .context("global_config()")? else { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), ))) } @@ -40,7 +57,7 @@ impl EnNamespace { pub async fn attestation_status_impl( &self, ) -> Result, Web3Error> { - let status = self + let Some(status) = self .state .acquire_connection() .await? @@ -54,13 +71,13 @@ impl EnNamespace { .context("TransactionBuilder::build()")? .consensus_dal() .attestation_status() - .await?; - - Ok(status.map(|s| { - en::AttestationStatus( - zksync_protobuf::serde::serialize(&s, serde_json::value::Serializer).unwrap(), - ) - })) + .await? + else { + return Ok(None); + }; + Ok(Some(en::AttestationStatus( + zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + ))) } pub(crate) fn current_method(&self) -> &MethodTracer { @@ -157,7 +174,7 @@ impl EnNamespace { l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, - recursion_scheduler_level_vk_hash: verifier_config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: verifier_config.snark_wrapper_vk_hash, fee_account, dummy_verifier: self.state.api_config.dummy_verifier, l1_batch_commit_data_generator_mode: self diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index c3bed64a1468..fda5ff6f06be 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -70,18 +70,11 @@ impl EthNamespace { .last_sealed_l2_block .diff_with_block_args(&block_args), ); - drop(connection); - if request.gas.is_none() { - request.gas = Some( - self.state - .tx_sender - .get_default_eth_call_gas(block_args) - .await - .map_err(Web3Error::InternalError)? - .into(), - ) + request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } + drop(connection); + let call_overrides = request.get_call_overrides()?; let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 0f8c71aa6281..a77498d4341d 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -2,23 +2,52 @@ use std::{pin::Pin, time::Instant}; +use async_trait::async_trait; use tokio::sync::watch; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig, wallets::Wallets}; use zksync_dal::ConnectionPool; use zksync_health_check::CheckHealth; -use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_fee_model::{BatchFeeModelInputProvider, MockBatchFeeParamsProvider}; use zksync_state::PostgresStorageCaches; -use zksync_types::L2ChainId; +use zksync_state_keeper::seal_criteria::NoopSealer; +use zksync_types::{ + fee_model::{BatchFeeInput, FeeParams}, + L2ChainId, +}; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{ - execution_sandbox::{testonly::MockTransactionExecutor, TransactionExecutor}, - tx_sender::TxSenderConfig, -}; +use crate::{execution_sandbox::TransactionExecutor, tx_sender::TxSenderConfig}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); +/// Same as [`MockBatchFeeParamsProvider`], but also artificially acquires a Postgres connection on each call +/// (same as the real node implementation). +#[derive(Debug)] +struct MockApiBatchFeeParamsProvider { + inner: MockBatchFeeParamsProvider, + pool: ConnectionPool, +} + +#[async_trait] +impl BatchFeeModelInputProvider for MockApiBatchFeeParamsProvider { + async fn get_batch_fee_input_scaled( + &self, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> anyhow::Result { + let _connection = self.pool.connection().await?; + self.inner + .get_batch_fee_input_scaled(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor) + .await + } + + fn get_fee_model_params(&self) -> FeeParams { + self.inner.get_fee_model_params() + } +} + pub(crate) async fn create_test_tx_sender( pool: ConnectionPool, l2_chain_id: L2ChainId, @@ -35,7 +64,10 @@ pub(crate) async fn create_test_tx_sender( ); let storage_caches = PostgresStorageCaches::new(1, 1); - let batch_fee_model_input_provider = Arc::new(MockBatchFeeParamsProvider::default()); + let batch_fee_model_input_provider = Arc::new(MockApiBatchFeeParamsProvider { + inner: MockBatchFeeParamsProvider::default(), + pool: pool.clone(), + }); let (mut tx_sender, vm_barrier) = crate::tx_sender::build_tx_sender( &tx_sender_config, &web3_config, @@ -48,7 +80,9 @@ pub(crate) async fn create_test_tx_sender( .await .expect("failed building transaction sender"); - Arc::get_mut(&mut tx_sender.0).unwrap().executor = tx_executor; + let tx_sender_inner = Arc::get_mut(&mut tx_sender.0).unwrap(); + tx_sender_inner.executor = tx_executor; + tx_sender_inner.sealer = Arc::new(NoopSealer); // prevents "unexecutable transaction" errors (tx_sender, vm_barrier) } @@ -99,7 +133,7 @@ impl ApiServerHandles { pub async fn spawn_http_server( api_config: InternalApiConfig, pool: ConnectionPool, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> ApiServerHandles { @@ -127,7 +161,7 @@ pub async fn spawn_ws_server( api_config, pool, websocket_requests_per_minute_limit, - MockTransactionExecutor::default(), + MockOneshotExecutor::default(), Arc::default(), stop_receiver, ) @@ -139,7 +173,7 @@ async fn spawn_server( api_config: InternalApiConfig, pool: ConnectionPool, websocket_requests_per_minute_limit: Option, - tx_executor: MockTransactionExecutor, + tx_executor: MockOneshotExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 409eb2004d17..635620e9c525 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -26,9 +26,12 @@ use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, l1_batch_metadata_to_commitment_artifacts, prepare_recovery_snapshot, }; +use zksync_system_constants::{ + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, +}; use zksync_types::{ api, - block::L2BlockHeader, + block::{pack_block_info, L2BlockHeader}, get_nonce_key, l2::L2Tx, storage::get_code_key, @@ -39,6 +42,7 @@ use zksync_types::{ U256, U64, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, jsonrpsee::{ @@ -54,10 +58,7 @@ use zksync_web3_decl::{ }; use super::*; -use crate::{ - execution_sandbox::testonly::MockTransactionExecutor, - web3::testonly::{spawn_http_server, spawn_ws_server}, -}; +use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; @@ -135,8 +136,8 @@ trait HttpTest: Send + Sync { StorageInitialization::Genesis } - fn transaction_executor(&self) -> MockTransactionExecutor { - MockTransactionExecutor::default() + fn transaction_executor(&self) -> MockOneshotExecutor { + MockOneshotExecutor::default() } fn method_tracer(&self) -> Arc { @@ -174,7 +175,7 @@ impl StorageInitialization { } async fn prepare_storage( - &self, + self, network_config: &NetworkConfig, storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { @@ -189,17 +190,33 @@ impl StorageInitialization { insert_genesis_batch(storage, ¶ms).await?; } } - Self::Recovery { logs, factory_deps } => { + Self::Recovery { + mut logs, + factory_deps, + } => { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info( + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + Self::SNAPSHOT_RECOVERY_BLOCK.0.into(), + ); + logs.push(StorageLog::new_write_log( + l2_block_info_key, + u256_to_h256(block_info), + )); + prepare_recovery_snapshot( storage, Self::SNAPSHOT_RECOVERY_BATCH, Self::SNAPSHOT_RECOVERY_BLOCK, - logs, + &logs, ) .await; storage .factory_deps_dal() - .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, factory_deps) + .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, &factory_deps) .await?; // Insert the next L1 batch in the storage so that the API server doesn't hang up. @@ -282,7 +299,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { } } -/// Stores L2 block with a single transaction and returns the L2 block header + transaction hash. +/// Stores L2 block and returns the L2 block header. async fn store_l2_block( storage: &mut Connection<'_, Core>, number: L2BlockNumber, @@ -298,6 +315,18 @@ async fn store_l2_block( assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); } + // Record L2 block info which is read by the VM sandbox logic + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let block_info = pack_block_info(number.0.into(), number.0.into()); + let l2_block_log = StorageLog::new_write_log(l2_block_info_key, u256_to_h256(block_info)); + storage + .storage_logs_dal() + .append_storage_logs(number, &[l2_block_log]) + .await?; + let new_l2_block = create_l2_block(number.0); storage.blocks_dal().insert_l2_block(&new_l2_block).await?; storage diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 90e1373a5cc6..d8d1a2c7768e 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -11,6 +11,7 @@ use zksync_types::{ L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -30,15 +31,15 @@ impl CallTest { } } - fn create_executor(only_block: L2BlockNumber) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - tx_executor.set_call_responses(move |tx, block_args| { + fn create_executor(latest_block: L2BlockNumber) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |tx, env| { let expected_block_number = match tx.execute.calldata() { - b"pending" => only_block + 1, - b"first" => only_block, + b"pending" => latest_block + 1, + b"latest" => latest_block, data => panic!("Unexpected calldata: {data:?}"), }; - assert_eq!(block_args.resolved_block_number(), expected_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, expected_block_number.0); ExecutionResult::Success { output: b"output".to_vec(), @@ -50,15 +51,20 @@ impl CallTest { #[async_trait] impl HttpTest for CallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - Self::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + Self::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_result = client .call(Self::call_request(b"pending"), None, None) .await?; @@ -66,8 +72,8 @@ impl HttpTest for CallTest { let valid_block_numbers_and_calldata = [ (api::BlockNumber::Pending, b"pending" as &[_]), - (api::BlockNumber::Latest, b"first"), - (0.into(), b"first"), + (api::BlockNumber::Latest, b"latest"), + (0.into(), b"latest"), ]; for (number, calldata) in valid_block_numbers_and_calldata { let number = api::BlockIdVariant::BlockNumber(number); @@ -107,7 +113,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(first_local_l2_block) } @@ -146,7 +152,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { for number in first_l2_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let call_result = client - .call(CallTest::call_request(b"first"), Some(number), None) + .call(CallTest::call_request(b"latest"), Some(number), None) .await?; assert_eq!(call_result.0, b"output"); } @@ -213,16 +219,16 @@ impl HttpTest for SendRawTransactionTest { } } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; - tx_executor.set_tx_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); - assert_eq!(block_args.resolved_block_number(), pending_block); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); tx_executor @@ -311,8 +317,8 @@ impl SendTransactionWithDetailedOutputTest { } #[async_trait] impl HttpTest for SendTransactionWithDetailedOutputTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), @@ -322,9 +328,9 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { total_log_queries_count: 0, }; - tx_executor.set_tx_responses_with_logs(move |tx, block_args| { + tx_executor.set_full_tx_responses(move |tx, env| { assert_eq!(tx.hash(), tx_bytes_and_hash.1); - assert_eq!(block_args.resolved_block_number(), L2BlockNumber(1)); + assert_eq!(env.l1_batch.first_l2_block.number, 1); VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, @@ -406,15 +412,20 @@ impl TraceCallTest { #[async_trait] impl HttpTest for TraceCallTest { - fn transaction_executor(&self) -> MockTransactionExecutor { - CallTest::create_executor(L2BlockNumber(0)) + fn transaction_executor(&self) -> MockOneshotExecutor { + CallTest::create_executor(L2BlockNumber(1)) } async fn test( &self, client: &DynClient, - _pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + drop(connection); + let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result); @@ -424,13 +435,9 @@ impl HttpTest for TraceCallTest { .await?; Self::assert_debug_call(&call_request, &call_result); - let genesis_block_numbers = [ - api::BlockNumber::Earliest, - api::BlockNumber::Latest, - 0.into(), - ]; - let call_request = CallTest::call_request(b"first"); - for number in genesis_block_numbers { + let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; + let call_request = CallTest::call_request(b"latest"); + for number in latest_block_numbers { let call_result = client .trace_call( call_request.clone(), @@ -474,7 +481,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { StorageInitialization::empty_recovery() } - fn transaction_executor(&self) -> MockTransactionExecutor { + fn transaction_executor(&self) -> MockOneshotExecutor { let number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; CallTest::create_executor(number) } @@ -504,7 +511,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { assert_pruned_block_error(&error, first_local_l2_block); } - let call_request = CallTest::call_request(b"first"); + let call_request = CallTest::call_request(b"latest"); let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; for number in first_l2_block_numbers { let number = api::BlockId::Number(number); @@ -544,18 +551,18 @@ impl HttpTest for EstimateGasTest { SendRawTransactionTest { snapshot_recovery }.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut tx_executor = MockOneshotExecutor::default(); let pending_block_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 } else { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { + tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); + assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); if tx.gas_limit() >= U256::from(gas_limit_threshold) { @@ -637,49 +644,17 @@ async fn estimate_gas_after_snapshot_recovery() { #[derive(Debug)] struct EstimateGasWithStateOverrideTest { - gas_limit_threshold: Arc, - snapshot_recovery: bool, -} - -impl EstimateGasWithStateOverrideTest { - fn new(snapshot_recovery: bool) -> Self { - Self { - gas_limit_threshold: Arc::default(), - snapshot_recovery, - } - } + inner: EstimateGasTest, } #[async_trait] impl HttpTest for EstimateGasWithStateOverrideTest { fn storage_initialization(&self) -> StorageInitialization { - let snapshot_recovery = self.snapshot_recovery; - SendRawTransactionTest { snapshot_recovery }.storage_initialization() + self.inner.storage_initialization() } - fn transaction_executor(&self) -> MockTransactionExecutor { - let mut tx_executor = MockTransactionExecutor::default(); - let pending_block_number = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 2 - } else { - L2BlockNumber(1) - }; - let gas_limit_threshold = self.gas_limit_threshold.clone(); - tx_executor.set_call_responses(move |tx, block_args| { - assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); - assert_eq!(block_args.resolved_block_number(), pending_block_number); - - let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); - if tx.gas_limit() >= U256::from(gas_limit_threshold) { - ExecutionResult::Success { output: vec![] } - } else { - ExecutionResult::Revert { - output: VmRevertReason::VmError, - } - } - }); - tx_executor + fn transaction_executor(&self) -> MockOneshotExecutor { + self.inner.transaction_executor() } async fn test( @@ -735,5 +710,6 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - test_http_server(EstimateGasWithStateOverrideTest::new(false)).await; + let inner = EstimateGasTest::new(false); + test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } diff --git a/core/node/base_token_adjuster/Cargo.toml b/core/node/base_token_adjuster/Cargo.toml index 3a0beb2ea137..9dcf5d796530 100644 --- a/core/node/base_token_adjuster/Cargo.toml +++ b/core/node/base_token_adjuster/Cargo.toml @@ -21,6 +21,7 @@ zksync_eth_client.workspace = true zksync_node_fee_model.workspace = true zksync_utils.workspace = true vise.workspace = true +bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs new file mode 100644 index 000000000000..0199b06ebd69 --- /dev/null +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -0,0 +1,331 @@ +use std::{ + cmp::max, + ops::{Div, Mul}, + sync::Arc, + time::Instant, +}; + +use anyhow::Context; +use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; +use zksync_config::BaseTokenAdjusterConfig; +use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, Options}; +use zksync_node_fee_model::l1_gas_price::TxParamsProvider; +use zksync_types::{ + base_token_ratio::BaseTokenAPIRatio, + ethabi::{Contract, Token}, + web3::{contract::Tokenize, BlockNumber}, + Address, U256, +}; + +use crate::metrics::{OperationResult, OperationResultLabels, METRICS}; + +#[derive(Debug, Clone)] +pub struct UpdateOnL1Params { + pub eth_client: Box, + pub gas_adjuster: Arc, + pub token_multiplier_setter_account_address: Address, + pub chain_admin_contract: Contract, + pub getters_facet_contract: Contract, + pub diamond_proxy_contract_address: Address, + pub chain_admin_contract_address: Option
, + pub config: BaseTokenAdjusterConfig, +} + +#[derive(Debug, Clone)] +pub enum BaseTokenL1Behaviour { + UpdateOnL1 { + params: UpdateOnL1Params, + last_persisted_l1_ratio: Option, + }, + NoOp, +} + +impl BaseTokenL1Behaviour { + pub async fn update_l1(&mut self, new_ratio: BaseTokenAPIRatio) -> anyhow::Result<()> { + let (l1_params, last_persisted_l1_ratio) = match self { + BaseTokenL1Behaviour::UpdateOnL1 { + ref params, + ref last_persisted_l1_ratio, + } => (¶ms.clone(), last_persisted_l1_ratio), + BaseTokenL1Behaviour::NoOp => return Ok(()), + }; + + let prev_ratio = if let Some(prev_ratio) = last_persisted_l1_ratio { + prev_ratio.clone() + } else { + let prev_ratio = self.get_current_ratio_from_l1(l1_params).await?; + self.update_last_persisted_l1_ratio(prev_ratio.clone()); + tracing::info!( + "Fetched current base token ratio from the L1: {}", + prev_ratio.to_bigint().unwrap() + ); + prev_ratio + }; + + let current_ratio = BigDecimal::from(new_ratio.numerator.get()) + .div(BigDecimal::from(new_ratio.denominator.get())); + let deviation = Self::compute_deviation(prev_ratio.clone(), current_ratio.clone()); + + if deviation < BigDecimal::from(l1_params.config.l1_update_deviation_percentage) { + tracing::debug!( + "Skipping L1 update. current_ratio {}, previous_ratio {}, deviation {}", + current_ratio, + prev_ratio, + deviation.to_bigint().unwrap() + ); + return Ok(()); + } + + let max_attempts = l1_params.config.l1_tx_sending_max_attempts; + let sleep_duration = l1_params.config.l1_tx_sending_sleep_duration(); + let mut prev_base_fee_per_gas: Option = None; + let mut prev_priority_fee_per_gas: Option = None; + let mut last_error = None; + for attempt in 0..max_attempts { + let (base_fee_per_gas, priority_fee_per_gas) = + self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); + + let start_time = Instant::now(); + let result = self + .do_update_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) + .await; + + match result { + Ok(x) => { + tracing::info!( + "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}, deviation {}", + new_ratio.numerator.get(), + new_ratio.denominator.get(), + base_fee_per_gas, + priority_fee_per_gas, + deviation.to_bigint().unwrap() + ); + METRICS + .l1_gas_used + .set(x.unwrap_or(U256::zero()).low_u128() as u64); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Success, + }] + .observe(start_time.elapsed()); + self.update_last_persisted_l1_ratio( + BigDecimal::from(new_ratio.numerator.get()) + .div(BigDecimal::from(new_ratio.denominator.get())), + ); + + return Ok(()); + } + Err(err) => { + tracing::info!( + "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", + attempt, + base_fee_per_gas, + priority_fee_per_gas, + err + ); + METRICS.l1_update_latency[&OperationResultLabels { + result: OperationResult::Failure, + }] + .observe(start_time.elapsed()); + + tokio::time::sleep(sleep_duration).await; + prev_base_fee_per_gas = Some(base_fee_per_gas); + prev_priority_fee_per_gas = Some(priority_fee_per_gas); + last_error = Some(err) + } + } + } + + let error_message = "Failed to update base token multiplier on L1"; + Err(last_error + .map(|x| x.context(error_message)) + .unwrap_or_else(|| anyhow::anyhow!(error_message))) + } + + fn update_last_persisted_l1_ratio(&mut self, new_ratio: BigDecimal) { + match self { + BaseTokenL1Behaviour::UpdateOnL1 { + params: _, + ref mut last_persisted_l1_ratio, + } => *last_persisted_l1_ratio = Some(new_ratio), + BaseTokenL1Behaviour::NoOp => {} + }; + } + + async fn do_update_l1( + &self, + l1_params: &UpdateOnL1Params, + api_ratio: BaseTokenAPIRatio, + base_fee_per_gas: u64, + priority_fee_per_gas: u64, + ) -> anyhow::Result> { + let fn_set_token_multiplier = l1_params + .chain_admin_contract + .function("setTokenMultiplier") + .context("`setTokenMultiplier` function must be present in the ChainAdmin contract")?; + + let calldata = fn_set_token_multiplier + .encode_input( + &( + Token::Address(l1_params.diamond_proxy_contract_address), + Token::Uint(api_ratio.numerator.get().into()), + Token::Uint(api_ratio.denominator.get().into()), + ) + .into_tokens(), + ) + .context("failed encoding `setTokenMultiplier` input")?; + + let nonce = (*l1_params.eth_client) + .as_ref() + .nonce_at_for_account( + l1_params.token_multiplier_setter_account_address, + BlockNumber::Latest, + ) + .await + .with_context(|| "failed getting transaction count")? + .as_u64(); + + let options = Options { + gas: Some(U256::from(l1_params.config.max_tx_gas)), + nonce: Some(U256::from(nonce)), + max_fee_per_gas: Some(U256::from(base_fee_per_gas + priority_fee_per_gas)), + max_priority_fee_per_gas: Some(U256::from(priority_fee_per_gas)), + ..Default::default() + }; + + let signed_tx = l1_params + .eth_client + .sign_prepared_tx_for_addr( + calldata, + l1_params.chain_admin_contract_address.unwrap(), + options, + ) + .await + .context("cannot sign a `setTokenMultiplier` transaction")?; + + let hash = (*l1_params.eth_client) + .as_ref() + .send_raw_tx(signed_tx.raw_tx) + .await + .context("failed sending `setTokenMultiplier` transaction")?; + + let max_attempts = l1_params.config.l1_receipt_checking_max_attempts; + let sleep_duration = l1_params.config.l1_receipt_checking_sleep_duration(); + for _i in 0..max_attempts { + let maybe_receipt = (*l1_params.eth_client) + .as_ref() + .tx_receipt(hash) + .await + .context("failed getting receipt for `setTokenMultiplier` transaction")?; + if let Some(receipt) = maybe_receipt { + if receipt.status == Some(1.into()) { + return Ok(receipt.gas_used); + } + return Err(anyhow::Error::msg(format!( + "`setTokenMultiplier` transaction {:?} failed with status {:?}", + hex::encode(hash), + receipt.status + ))); + } else { + tokio::time::sleep(sleep_duration).await; + } + } + + Err(anyhow::Error::msg(format!( + "Unable to retrieve `setTokenMultiplier` transaction status in {} attempts", + max_attempts + ))) + } + + async fn get_current_ratio_from_l1( + &self, + l1_params: &UpdateOnL1Params, + ) -> anyhow::Result { + let numerator: U256 = CallFunctionArgs::new("baseTokenGasPriceMultiplierNominator", ()) + .for_contract( + l1_params.diamond_proxy_contract_address, + &l1_params.getters_facet_contract, + ) + .call((*l1_params.eth_client).as_ref()) + .await?; + let denominator: U256 = CallFunctionArgs::new("baseTokenGasPriceMultiplierDenominator", ()) + .for_contract( + l1_params.diamond_proxy_contract_address, + &l1_params.getters_facet_contract, + ) + .call((*l1_params.eth_client).as_ref()) + .await?; + Ok(BigDecimal::from(numerator.as_u128()).div(BigDecimal::from(denominator.as_u128()))) + } + + fn get_eth_fees( + &self, + l1_params: &UpdateOnL1Params, + prev_base_fee_per_gas: Option, + prev_priority_fee_per_gas: Option, + ) -> (u64, u64) { + // Use get_blob_tx_base_fee here instead of get_base_fee to optimise for fast inclusion. + // get_base_fee might cause the transaction to be stuck in the mempool for 10+ minutes. + let mut base_fee_per_gas = l1_params.gas_adjuster.as_ref().get_blob_tx_base_fee(); + let mut priority_fee_per_gas = l1_params.gas_adjuster.as_ref().get_priority_fee(); + if let Some(x) = prev_priority_fee_per_gas { + // Increase `priority_fee_per_gas` by at least 20% to prevent "replacement transaction under-priced" error. + priority_fee_per_gas = max(priority_fee_per_gas, (x * 6) / 5 + 1); + } + + if let Some(x) = prev_base_fee_per_gas { + // same for base_fee_per_gas but 10% + base_fee_per_gas = max(base_fee_per_gas, x + (x / 10) + 1); + } + + // Extra check to prevent sending transaction with extremely high priority fee. + if priority_fee_per_gas > l1_params.config.max_acceptable_priority_fee_in_gwei { + panic!( + "Extremely high value of priority_fee_per_gas is suggested: {}, while max acceptable is {}", + priority_fee_per_gas, + l1_params.config.max_acceptable_priority_fee_in_gwei + ); + } + + (base_fee_per_gas, priority_fee_per_gas) + } + + fn compute_deviation(prev: BigDecimal, next: BigDecimal) -> BigDecimal { + if prev.eq(&BigDecimal::zero()) { + return BigDecimal::from(100); + } + + (prev.clone() - next.clone()) + .abs() + .div(prev.clone()) + .mul(BigDecimal::from(100)) + } +} + +#[cfg(test)] +mod tests { + use std::ops::Div; + + use bigdecimal::{BigDecimal, Zero}; + + use crate::base_token_l1_behaviour::BaseTokenL1Behaviour; + + #[test] + fn test_compute_deviation() { + let prev_ratio = BigDecimal::from(4); + let current_ratio = BigDecimal::from(5); + let deviation = + BaseTokenL1Behaviour::compute_deviation(prev_ratio.clone(), current_ratio.clone()); + assert_eq!(deviation, BigDecimal::from(25)); + + let deviation = BaseTokenL1Behaviour::compute_deviation(current_ratio, prev_ratio); + assert_eq!(deviation, BigDecimal::from(20)); + } + + #[test] + fn test_compute_deviation_when_prev_is_zero() { + let prev_ratio = BigDecimal::zero(); + let current_ratio = BigDecimal::from(1).div(BigDecimal::from(2)); + let deviation = BaseTokenL1Behaviour::compute_deviation(prev_ratio, current_ratio); + assert_eq!(deviation, BigDecimal::from(100)); + } +} diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 12cd6233efbb..220f100e5dcb 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -1,30 +1,16 @@ -use std::{cmp::max, fmt::Debug, sync::Arc, time::Instant}; +use std::{fmt::Debug, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::{sync::watch, time::sleep}; use zksync_config::configs::base_token_adjuster::BaseTokenAdjusterConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{BoundEthInterface, Options}; use zksync_external_price_api::PriceAPIClient; -use zksync_node_fee_model::l1_gas_price::TxParamsProvider; -use zksync_types::{ - base_token_ratio::BaseTokenAPIRatio, - ethabi::{Contract, Token}, - web3::{contract::Tokenize, BlockNumber}, - Address, U256, -}; - -use crate::metrics::{OperationResult, OperationResultLabels, METRICS}; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; -#[derive(Debug, Clone)] -pub struct BaseTokenRatioPersisterL1Params { - pub eth_client: Box, - pub gas_adjuster: Arc, - pub token_multiplier_setter_account_address: Address, - pub chain_admin_contract: Contract, - pub diamond_proxy_contract_address: Address, - pub chain_admin_contract_address: Option
, -} +use crate::{ + base_token_l1_behaviour::BaseTokenL1Behaviour, + metrics::{OperationResult, OperationResultLabels, METRICS}, +}; #[derive(Debug, Clone)] pub struct BaseTokenRatioPersister { @@ -32,7 +18,7 @@ pub struct BaseTokenRatioPersister { config: BaseTokenAdjusterConfig, base_token_address: Address, price_api_client: Arc, - l1_params: Option, + l1_behaviour: BaseTokenL1Behaviour, } impl BaseTokenRatioPersister { @@ -42,14 +28,14 @@ impl BaseTokenRatioPersister { config: BaseTokenAdjusterConfig, base_token_address: Address, price_api_client: Arc, - l1_params: Option, + l1_behaviour: BaseTokenL1Behaviour, ) -> Self { Self { pool, config, base_token_address, price_api_client, - l1_params, + l1_behaviour, } } @@ -80,108 +66,11 @@ impl BaseTokenRatioPersister { Ok(()) } - async fn loop_iteration(&self) -> anyhow::Result<()> { + async fn loop_iteration(&mut self) -> anyhow::Result<()> { // TODO(PE-148): Consider shifting retry upon adding external API redundancy. let new_ratio = self.retry_fetch_ratio().await?; self.persist_ratio(new_ratio).await?; - self.retry_update_ratio_on_l1(new_ratio).await - } - - fn get_eth_fees( - &self, - l1_params: &BaseTokenRatioPersisterL1Params, - prev_base_fee_per_gas: Option, - prev_priority_fee_per_gas: Option, - ) -> (u64, u64) { - // Use get_blob_tx_base_fee here instead of get_base_fee to optimise for fast inclusion. - // get_base_fee might cause the transaction to be stuck in the mempool for 10+ minutes. - let mut base_fee_per_gas = l1_params.gas_adjuster.as_ref().get_blob_tx_base_fee(); - let mut priority_fee_per_gas = l1_params.gas_adjuster.as_ref().get_priority_fee(); - if let Some(x) = prev_priority_fee_per_gas { - // Increase `priority_fee_per_gas` by at least 20% to prevent "replacement transaction under-priced" error. - priority_fee_per_gas = max(priority_fee_per_gas, (x * 6) / 5 + 1); - } - - if let Some(x) = prev_base_fee_per_gas { - // same for base_fee_per_gas but 10% - base_fee_per_gas = max(base_fee_per_gas, x + (x / 10) + 1); - } - - // Extra check to prevent sending transaction will extremely high priority fee. - if priority_fee_per_gas > self.config.max_acceptable_priority_fee_in_gwei { - panic!( - "Extremely high value of priority_fee_per_gas is suggested: {}, while max acceptable is {}", - priority_fee_per_gas, - self.config.max_acceptable_priority_fee_in_gwei - ); - } - - (base_fee_per_gas, priority_fee_per_gas) - } - - async fn retry_update_ratio_on_l1(&self, new_ratio: BaseTokenAPIRatio) -> anyhow::Result<()> { - let Some(l1_params) = &self.l1_params else { - return Ok(()); - }; - - let max_attempts = self.config.l1_tx_sending_max_attempts; - let sleep_duration = self.config.l1_tx_sending_sleep_duration(); - let mut prev_base_fee_per_gas: Option = None; - let mut prev_priority_fee_per_gas: Option = None; - let mut last_error = None; - for attempt in 0..max_attempts { - let (base_fee_per_gas, priority_fee_per_gas) = - self.get_eth_fees(l1_params, prev_base_fee_per_gas, prev_priority_fee_per_gas); - - let start_time = Instant::now(); - let result = self - .update_ratio_on_l1(l1_params, new_ratio, base_fee_per_gas, priority_fee_per_gas) - .await; - - match result { - Ok(x) => { - tracing::info!( - "Updated base token multiplier on L1: numerator {}, denominator {}, base_fee_per_gas {}, priority_fee_per_gas {}", - new_ratio.numerator.get(), - new_ratio.denominator.get(), - base_fee_per_gas, - priority_fee_per_gas - ); - METRICS - .l1_gas_used - .set(x.unwrap_or(U256::zero()).low_u128() as u64); - METRICS.l1_update_latency[&OperationResultLabels { - result: OperationResult::Success, - }] - .observe(start_time.elapsed()); - - return Ok(()); - } - Err(err) => { - tracing::info!( - "Failed to update base token multiplier on L1, attempt {}, base_fee_per_gas {}, priority_fee_per_gas {}: {}", - attempt, - base_fee_per_gas, - priority_fee_per_gas, - err - ); - METRICS.l1_update_latency[&OperationResultLabels { - result: OperationResult::Failure, - }] - .observe(start_time.elapsed()); - - tokio::time::sleep(sleep_duration).await; - prev_base_fee_per_gas = Some(base_fee_per_gas); - prev_priority_fee_per_gas = Some(priority_fee_per_gas); - last_error = Some(err) - } - } - } - - let error_message = "Failed to update base token multiplier on L1"; - Err(last_error - .map(|x| x.context(error_message)) - .unwrap_or_else(|| anyhow::anyhow!(error_message))) + self.l1_behaviour.update_l1(new_ratio).await } async fn retry_fetch_ratio(&self) -> anyhow::Result { @@ -244,89 +133,4 @@ impl BaseTokenRatioPersister { Ok(id) } - - async fn update_ratio_on_l1( - &self, - l1_params: &BaseTokenRatioPersisterL1Params, - api_ratio: BaseTokenAPIRatio, - base_fee_per_gas: u64, - priority_fee_per_gas: u64, - ) -> anyhow::Result> { - let fn_set_token_multiplier = l1_params - .chain_admin_contract - .function("setTokenMultiplier") - .context("`setTokenMultiplier` function must be present in the ChainAdmin contract")?; - - let calldata = fn_set_token_multiplier - .encode_input( - &( - Token::Address(l1_params.diamond_proxy_contract_address), - Token::Uint(api_ratio.numerator.get().into()), - Token::Uint(api_ratio.denominator.get().into()), - ) - .into_tokens(), - ) - .context("failed encoding `setTokenMultiplier` input")?; - - let nonce = (*l1_params.eth_client) - .as_ref() - .nonce_at_for_account( - l1_params.token_multiplier_setter_account_address, - BlockNumber::Pending, - ) - .await - .with_context(|| "failed getting transaction count")? - .as_u64(); - - let options = Options { - gas: Some(U256::from(self.config.max_tx_gas)), - nonce: Some(U256::from(nonce)), - max_fee_per_gas: Some(U256::from(base_fee_per_gas + priority_fee_per_gas)), - max_priority_fee_per_gas: Some(U256::from(priority_fee_per_gas)), - ..Default::default() - }; - - let signed_tx = l1_params - .eth_client - .sign_prepared_tx_for_addr( - calldata, - l1_params.chain_admin_contract_address.unwrap(), - options, - ) - .await - .context("cannot sign a `setTokenMultiplier` transaction")?; - - let hash = (*l1_params.eth_client) - .as_ref() - .send_raw_tx(signed_tx.raw_tx) - .await - .context("failed sending `setTokenMultiplier` transaction")?; - - let max_attempts = self.config.l1_receipt_checking_max_attempts; - let sleep_duration = self.config.l1_receipt_checking_sleep_duration(); - for _i in 0..max_attempts { - let maybe_receipt = (*l1_params.eth_client) - .as_ref() - .tx_receipt(hash) - .await - .context("failed getting receipt for `setTokenMultiplier` transaction")?; - if let Some(receipt) = maybe_receipt { - if receipt.status == Some(1.into()) { - return Ok(receipt.gas_used); - } - return Err(anyhow::Error::msg(format!( - "`setTokenMultiplier` transaction {:?} failed with status {:?}", - hex::encode(hash), - receipt.status - ))); - } else { - tokio::time::sleep(sleep_duration).await; - } - } - - Err(anyhow::Error::msg(format!( - "Unable to retrieve `setTokenMultiplier` transaction status in {} attempts", - max_attempts - ))) - } } diff --git a/core/node/base_token_adjuster/src/lib.rs b/core/node/base_token_adjuster/src/lib.rs index d786b440f622..ddfad6ea8c92 100644 --- a/core/node/base_token_adjuster/src/lib.rs +++ b/core/node/base_token_adjuster/src/lib.rs @@ -1,8 +1,10 @@ pub use self::{ - base_token_ratio_persister::{BaseTokenRatioPersister, BaseTokenRatioPersisterL1Params}, + base_token_l1_behaviour::{BaseTokenL1Behaviour, UpdateOnL1Params}, + base_token_ratio_persister::BaseTokenRatioPersister, base_token_ratio_provider::{DBBaseTokenRatioProvider, NoOpRatioProvider}, }; +mod base_token_l1_behaviour; mod base_token_ratio_persister; mod base_token_ratio_provider; mod metrics; diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index e6f6571adc1d..d84e4da0c0c7 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -15,7 +15,7 @@ pub(crate) struct OperationResultLabels { } #[derive(Debug, Metrics)] -#[metrics(prefix = "snapshots_creator")] +#[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index e82969dae6c6..707bd957d810 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -20,6 +20,7 @@ zksync_consensus_storage.workspace = true zksync_consensus_executor.workspace = true zksync_consensus_bft.workspace = true zksync_consensus_utils.workspace = true +zksync_contracts.workspace = true zksync_protobuf.workspace = true zksync_dal.workspace = true zksync_l1_contract_interface.workspace = true @@ -31,22 +32,24 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true - +zksync_node_api_server.workspace = true +zksync_state.workspace = true +zksync_vm_interface.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +hex.workspace = true tokio.workspace = true +semver.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true -zksync_contracts.workspace = true -tokio.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/abi.rs b/core/node/consensus/src/abi.rs new file mode 100644 index 000000000000..0e2200e28038 --- /dev/null +++ b/core/node/consensus/src/abi.rs @@ -0,0 +1,133 @@ +//! Strongly-typed API for Consensus-related solidity contracts. +//! Placeholder until we can depend on alloy_sol_types. +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +/// Strongly typed representation of a contract function. +/// It also represents the inputs of the function. +pub trait Function { + /// Name of the solidity function. + const NAME: &'static str; + /// Type representing contract this function belongs to. + type Contract: AsRef; + /// Typ representing outputs of this function. + type Outputs; + /// Encodes this struct to inputs of this function. + fn encode(&self) -> Vec; + /// Decodes outputs of this function. + fn decode_outputs(outputs: Vec) -> anyhow::Result; +} + +/// Address of contract C. It is just a wrapper of ethabi::Address, +/// just additionally indicating what contract is deployed under this address. +#[derive(Debug)] +pub struct Address(ethabi::Address, std::marker::PhantomData); + +impl Clone for Address { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Address {} + +impl PartialEq for Address { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for Address {} + +impl Address { + pub fn new(address: ethabi::Address) -> Self { + Self(address, std::marker::PhantomData) + } +} + +impl std::ops::Deref for Address { + type Target = ethabi::Address; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a call to the function F. +#[derive(Debug)] +pub struct Call { + /// Contract of the function. + pub contract: F::Contract, + /// Inputs to the function. + pub inputs: F, +} + +impl Call { + pub(super) fn function(&self) -> ðabi::Function { + self.contract.as_ref().function(F::NAME).unwrap() + } + /// Converts the call to raw calldata. + pub fn calldata(&self) -> ethabi::Result { + self.function().encode_input(&self.inputs.encode()) + } + /// Parses the outputs of the call. + pub fn decode_outputs(&self, outputs: &[u8]) -> anyhow::Result { + F::decode_outputs( + self.function() + .decode_output(outputs) + .context("decode_output()")?, + ) + } +} + +pub(crate) fn into_fixed_bytes(t: Token) -> anyhow::Result<[u8; N]> { + match t { + Token::FixedBytes(b) => b.try_into().ok().context("bad size"), + bad => anyhow::bail!("want fixed_bytes, got {bad:?}"), + } +} + +pub(crate) fn into_tuple(t: Token) -> anyhow::Result<[Token; N]> { + match t { + Token::Tuple(ts) => ts.try_into().ok().context("bad size"), + bad => anyhow::bail!("want tuple, got {bad:?}"), + } +} + +pub(crate) fn into_uint>(t: Token) -> anyhow::Result { + match t { + Token::Uint(i) => i.try_into().ok().context("overflow"), + bad => anyhow::bail!("want uint, got {bad:?}"), + } +} + +#[cfg(test)] +fn example(t: ðabi::ParamType) -> Token { + use ethabi::ParamType as T; + match t { + T::Address => Token::Address(ethabi::Address::default()), + T::Bytes => Token::Bytes(ethabi::Bytes::default()), + T::Int(_) => Token::Int(ethabi::Int::default()), + T::Uint(_) => Token::Uint(ethabi::Uint::default()), + T::Bool => Token::Bool(bool::default()), + T::String => Token::String(String::default()), + T::Array(t) => Token::Array(vec![example(t)]), + T::FixedBytes(n) => Token::FixedBytes(vec![0; *n]), + T::FixedArray(t, n) => Token::FixedArray(vec![example(t); *n]), + T::Tuple(ts) => Token::Tuple(ts.iter().map(example).collect()), + } +} + +#[cfg(test)] +impl Call { + pub(crate) fn test(&self) -> anyhow::Result<()> { + self.calldata().context("calldata()")?; + F::decode_outputs( + self.function() + .outputs + .iter() + .map(|p| example(&p.kind)) + .collect(), + )?; + Ok(()) + } +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index c2fa13472066..22f8fc01192f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -11,6 +11,8 @@ use zksync_config::{ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{attester, node, validator}; +use zksync_dal::consensus_dal; +use zksync_types::ethabi; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -41,16 +43,18 @@ pub(super) struct GenesisSpec { pub(super) validators: validator::Committee, pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, + pub(super) registry_address: Option, } impl GenesisSpec { - pub(super) fn from_genesis(g: &validator::Genesis) -> Self { + pub(super) fn from_global_config(cfg: &consensus_dal::GlobalConfig) -> Self { Self { - chain_id: g.chain_id, - protocol_version: g.protocol_version, - validators: g.validators.clone(), - attesters: g.attesters.clone(), - leader_selection: g.leader_selection.clone(), + chain_id: cfg.genesis.chain_id, + protocol_version: cfg.genesis.protocol_version, + validators: cfg.genesis.validators.clone(), + attesters: cfg.genesis.attesters.clone(), + leader_selection: cfg.genesis.leader_selection.clone(), + registry_address: cfg.registry_address, } } @@ -93,6 +97,7 @@ impl GenesisSpec { } else { Some(attester::Committee::new(attesters).context("attesters")?) }, + registry_address: x.registry_address, }) } } @@ -104,6 +109,7 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { let mut gossip_static_outbound = HashMap::new(); { @@ -128,6 +134,7 @@ pub(super) fn executor( }; Ok(executor::Config { + build_version, server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 259cac5d074a..a52393c0f488 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -6,15 +6,20 @@ use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; -use zksync_node_sync::{ - fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, -}; -use zksync_protobuf::ProtoFmt as _; +use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; -use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::is_retriable, + jsonrpsee::{core::ClientError, types::error::ErrorCode}, + namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, +}; use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; -use crate::storage::{self, ConnectionPool}; +use crate::{ + registry, + storage::{self, ConnectionPool}, +}; /// External node. pub(super) struct EN { @@ -27,7 +32,7 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node if fetches all the blocks + /// NOTE: Before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. pub async fn run( self, @@ -35,6 +40,7 @@ impl EN { actions: ActionQueueSender, cfg: ConsensusConfig, secrets: ConsensusSecrets, + build_version: Option, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -47,13 +53,16 @@ impl EN { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); - // Initialize genesis. - let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; + // Initialize global config. + let global_config = self + .fetch_global_config(ctx) + .await + .wrap("fetch_global_config()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &genesis) + conn.try_update_global_config(ctx, &global_config) .await - .wrap("set_genesis()")?; + .wrap("try_update_global_config()")?; let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) @@ -63,21 +72,25 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await - .wrap("fetch_blocks()")?; + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ - let old = genesis.clone(); + let old = global_config.clone(); async { let old = old; loop { - if let Ok(new) = self.fetch_genesis(ctx).await { + if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( - "genesis changed: old {old:?}, new {new:?}" + "global config changed: old {old:?}, new {new:?}" ) .into()); } @@ -105,10 +118,14 @@ impl EN { s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); + s.spawn_bg(self.run_attestation_controller( + ctx, + global_config.clone(), + attestation.clone(), + )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -164,24 +181,21 @@ impl EN { /// Monitors the `AttestationStatus` on the main node, /// and updates the attestation config accordingly. - async fn run_attestation_updater( + async fn run_attestation_controller( &self, ctx: &ctx::Ctx, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); + let registry = registry::Registry::new(cfg.genesis.clone(), self.pool.clone()).await; let mut next = attester::BatchNumber(0); loop { let status = loop { match self.fetch_attestation_status(ctx).await { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { - if status.genesis != genesis.hash() { + if status.genesis != cfg.genesis.hash() { return Err(anyhow::format_err!("genesis mismatch").into()); } if status.next_batch_to_attest >= next { @@ -191,6 +205,7 @@ impl EN { } ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -199,6 +214,27 @@ impl EN { .pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for( + ctx, + cfg.registry_address.map(registry::Address::new), + status.next_batch_to_attest, + ) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + self.pool + .connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -214,7 +250,6 @@ impl EN { })) .await .context("start_attestation()")?; - next = status.next_batch_to_attest.next(); } } @@ -224,37 +259,52 @@ impl EN { const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - match ctx.wait(self.client.fetch_l2_block_number()).await? { + match ctx.wait(self.client.get_block_number()).await? { Ok(head) => { + let head = L2BlockNumber(head.try_into().ok().context("overflow")?); self.sync_state.set_main_node_block(head); ctx.sleep(DELAY_INTERVAL).await?; } Err(err) => { - tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + tracing::warn!("get_block_number(): {err}"); ctx.sleep(RETRY_INTERVAL).await?; } } } } - /// Fetches genesis from the main node. + /// Fetches consensus global configuration from the main node. #[tracing::instrument(skip_all)] - async fn fetch_genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { + async fn fetch_global_config( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.consensus_global_config()).await? { + Ok(cfg) => { + let cfg = cfg.context("main node is not running consensus component")?; + return Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?); + } + // For non-whitelisted methods, proxyd returns HTTP 403 with MethodNotFound in the body. + // For some stupid reason ClientError doesn't expose HTTP error codes. + Err(ClientError::Transport(_)) => {} + // For missing methods api server, returns HTTP 200 with MethodNotFound in the body. + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => {} + Err(err) => { + return Err(err) + .context("consensus_global_config()") + .map_err(|err| err.into()); + } + } + tracing::info!("consensus_global_config() not found, calling consensus_genesis() instead"); let genesis = ctx - .wait(self.client.fetch_consensus_genesis()) + .wait(self.client.consensus_genesis()) .await? - .context("fetch_consensus_genesis()")? + .context("consensus_genesis()")? .context("main node is not running consensus component")?; - // Deserialize the json, but don't allow for unknown fields. - // We need to compute the hash of the Genesis, so simply ignoring the unknown fields won't - // do. - Ok(validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis.0, /*deny_unknown_fields=*/ true, - ) - .context("deserialize")?, - )? - .with_hash()) + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, + registry_address: None, + }) } #[tracing::instrument(skip_all)] @@ -262,15 +312,12 @@ impl EN { &self, ctx: &ctx::Ctx, ) -> ctx::Result { - match ctx.wait(self.client.fetch_attestation_status()).await? { - Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?), - Ok(None) => Err(anyhow::format_err!("empty response").into()), - Err(err) => Err(anyhow::format_err!( - "AttestationStatus call to main node HTTP RPC failed: {err:#}" - ) - .into()), - } + let status = ctx + .wait(self.client.attestation_status()) + .await? + .context("attestation_status()")? + .context("main node is not runnign consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. @@ -278,14 +325,11 @@ impl EN { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - let res = ctx.wait(self.client.fetch_l2_block(n, true)).await?; - match res { + match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), Ok(None) => {} - Err(err) if err.is_retriable() => {} - Err(err) => { - return Err(anyhow::format_err!("client.fetch_l2_block({}): {err}", n).into()); - } + Err(err) if is_retriable(&err) => {} + Err(err) => Err(err).with_context(|| format!("client.sync_l2_block({n})"))?, } ctx.sleep(RETRY_INTERVAL).await?; } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 574e496f4d11..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -45,6 +45,7 @@ pub async fn run_external_node( sync_state: SyncState, main_node_client: Box>, actions: ActionQueueSender, + build_version: semver::Version, ) -> anyhow::Result<()> { let en = en::EN { pool: ConnectionPool(pool), @@ -58,7 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets).await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 13d918b5b6ee..ff9cdf865281 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -5,6 +5,7 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +mod abi; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. #[allow(unused)] @@ -13,8 +14,10 @@ mod config; mod en; pub mod era; mod mn; +mod registry; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; +mod vm; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 7de86b4d8ba1..4d428346ebe4 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -6,9 +6,10 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_dal::consensus_dal; use crate::{ - config, + config, registry, storage::{ConnectionPool, InsertCertificateError, Store}, }; @@ -36,9 +37,9 @@ pub async fn run_main_node( pool.connection(ctx) .await .wrap("connection()")? - .adjust_genesis(ctx, &spec) + .adjust_global_config(ctx, &spec) .await - .wrap("adjust_genesis()")?; + .wrap("adjust_global_config()")?; } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. @@ -47,33 +48,40 @@ pub async fn run_main_node( .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + let global_config = pool + .connection(ctx) .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - - let genesis = block_store.genesis().clone(); + .wrap("connection()")? + .global_config(ctx) + .await + .wrap("global_config()")? + .context("global_config() disappeared")?; anyhow::ensure!( - genesis.leader_selection + global_config.genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) .await .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_updater( + s.spawn_bg(run_attestation_controller( ctx, &pool, - genesis, + global_config, attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, None)?, block_store, batch_store, validator: Some(executor::Validator { @@ -93,18 +101,17 @@ pub async fn run_main_node( /// Manages attestation state by configuring the /// next batch to attest and storing the collected /// certificates. -async fn run_attestation_updater( +async fn run_attestation_controller( ctx: &ctx::Ctx, pool: &ConnectionPool, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; + let registry_addr = cfg.registry_address.map(registry::Address::new); + let mut next = attester::BatchNumber(0); let res = async { - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); loop { // After regenesis it might happen that the batch number for the first block // is not immediately known (the first block was not produced yet), @@ -118,10 +125,12 @@ async fn run_attestation_updater( .await .wrap("attestation_status()")? { - Some(status) => break status, - None => ctx.sleep(POLL_INTERVAL).await?, + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} } + ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -129,6 +138,22 @@ async fn run_attestation_updater( let hash = pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -140,7 +165,7 @@ async fn run_attestation_updater( number: status.next_batch_to_attest, genesis: status.genesis, }, - committee: committee.clone(), + committee, })) .await .context("start_attestation()")?; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs new file mode 100644 index 000000000000..55cc7f9264fb --- /dev/null +++ b/core/node/consensus/src/registry/abi.rs @@ -0,0 +1,225 @@ +//! Strongly-typed API for ConsensusRegistry contract. +#![allow(dead_code)] + +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +use crate::abi; + +/// Reprents ConsensusRegistry contract. +#[derive(Debug, Clone)] +pub(crate) struct ConsensusRegistry(Arc); + +impl AsRef for ConsensusRegistry { + fn as_ref(&self) -> ðabi::Contract { + &self.0 + } +} + +impl ConsensusRegistry { + const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + + /// Loads bytecode of the contract. + #[cfg(test)] + pub(crate) fn bytecode() -> Vec { + zksync_contracts::read_bytecode(Self::FILE) + } + + /// Loads the `ethabi` representation of the contract. + pub(crate) fn load() -> Self { + Self(zksync_contracts::load_contract(ConsensusRegistry::FILE).into()) + } + + /// Constructs a call to function `F` of this contract. + pub(crate) fn call>(&self, inputs: F) -> abi::Call { + abi::Call { + contract: self.clone(), + inputs, + } + } +} + +/// ConsensusRegistry.getAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct GetAttesterCommittee; + +impl abi::Function for GetAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "getAttesterCommittee"; + + fn encode(&self) -> Vec { + vec![] + } + + type Outputs = Vec; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [attesters] = tokens.try_into().ok().context("bad size")?; + let mut res = vec![]; + for token in attesters.into_array().context("not array")? { + res.push(Attester::from_token(token).context("attesters")?); + } + Ok(res) + } +} + +/// ConsensusRegistry.add function. +#[derive(Debug, Default)] +pub(crate) struct Add { + pub(crate) node_owner: ethabi::Address, + pub(crate) validator_weight: u32, + pub(crate) validator_pub_key: BLS12_381PublicKey, + pub(crate) validator_pop: BLS12_381Signature, + pub(crate) attester_weight: u32, + pub(crate) attester_pub_key: Secp256k1PublicKey, +} + +impl abi::Function for Add { + type Contract = ConsensusRegistry; + const NAME: &'static str = "add"; + fn encode(&self) -> Vec { + vec![ + Token::Address(self.node_owner), + Token::Uint(self.validator_weight.into()), + self.validator_pub_key.to_token(), + self.validator_pop.to_token(), + Token::Uint(self.attester_weight.into()), + self.attester_pub_key.to_token(), + ] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.initialize function. +#[derive(Debug, Default)] +pub(crate) struct Initialize { + pub(crate) initial_owner: ethabi::Address, +} + +impl abi::Function for Initialize { + type Contract = ConsensusRegistry; + const NAME: &'static str = "initialize"; + fn encode(&self) -> Vec { + vec![Token::Address(self.initial_owner)] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.commitAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct CommitAttesterCommittee; + +impl abi::Function for CommitAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "commitAttesterCommittee"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.owner function. +#[derive(Debug, Default)] +pub(crate) struct Owner; + +impl abi::Function for Owner { + type Contract = ConsensusRegistry; + const NAME: &'static str = "owner"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = ethabi::Address; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [owner] = tokens.try_into().ok().context("bad size")?; + owner.into_address().context("not an address") + } +} + +// Auxiliary structs. + +/// Raw representation of a secp256k1 public key. +#[derive(Debug, Default)] +pub(crate) struct Secp256k1PublicKey { + pub(crate) tag: [u8; 1], + pub(crate) x: [u8; 32], +} + +impl Secp256k1PublicKey { + fn from_token(token: Token) -> anyhow::Result { + let [tag, x] = abi::into_tuple(token)?; + Ok(Self { + tag: abi::into_fixed_bytes(tag).context("tag")?, + x: abi::into_fixed_bytes(x).context("x")?, + }) + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.tag.into()), + Token::FixedBytes(self.x.into()), + ]) + } +} + +/// Raw representation of an attester committee member. +#[derive(Debug)] +pub(crate) struct Attester { + pub(crate) weight: u32, + pub(crate) pub_key: Secp256k1PublicKey, +} + +impl Attester { + fn from_token(token: Token) -> anyhow::Result { + let [weight, pub_key] = abi::into_tuple(token)?; + Ok(Self { + weight: abi::into_uint(weight).context("weight")?, + pub_key: Secp256k1PublicKey::from_token(pub_key).context("pub_key")?, + }) + } +} + +/// Raw representation of a BLS12_381 public key. +#[derive(Debug, Default)] +pub(crate) struct BLS12_381PublicKey { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 32], + pub(crate) c: [u8; 32], +} + +impl BLS12_381PublicKey { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + Token::FixedBytes(self.c.into()), + ]) + } +} + +#[derive(Debug, Default)] +pub(crate) struct BLS12_381Signature { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 16], +} + +impl BLS12_381Signature { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + ]) + } +} diff --git a/core/node/consensus/src/registry/mod.rs b/core/node/consensus/src/registry/mod.rs new file mode 100644 index 000000000000..74da41309573 --- /dev/null +++ b/core/node/consensus/src/registry/mod.rs @@ -0,0 +1,80 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{storage::ConnectionPool, vm::VM}; + +mod abi; +#[cfg(test)] +pub(crate) mod testonly; +#[cfg(test)] +mod tests; + +fn decode_attester_key(k: &abi::Secp256k1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester(a: &abi::Attester) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +pub type Address = crate::abi::Address; + +#[derive(Debug)] +pub(crate) struct Registry { + contract: abi::ConsensusRegistry, + genesis: validator::Genesis, + vm: VM, +} + +impl Registry { + pub async fn new(genesis: validator::Genesis, pool: ConnectionPool) -> Self { + Self { + contract: abi::ConsensusRegistry::load(), + genesis, + vm: VM::new(pool).await, + } + } + + /// Attester committee for the given batch. + /// It reads committee from the contract. + /// Falls back to committee specified in the genesis. + pub async fn attester_committee_for( + &self, + ctx: &ctx::Ctx, + address: Option
, + attested_batch: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch_defining_committee) = attested_batch.prev() else { + // Batch 0 doesn't need attestation. + return Ok(None); + }; + let Some(address) = address else { + return Ok(self.genesis.attesters.clone()); + }; + let raw = self + .vm + .call( + ctx, + batch_defining_committee, + address, + self.contract.call(abi::GetAttesterCommittee), + ) + .await + .wrap("vm.call()")?; + let mut attesters = vec![]; + for a in raw { + attesters.push(decode_weighted_attester(&a).context("decode_weighted_attester()")?); + } + Ok(Some( + attester::Committee::new(attesters.into_iter()).context("Committee::new()")?, + )) + } +} diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs new file mode 100644 index 000000000000..a0c55a557feb --- /dev/null +++ b/core/node/consensus/src/registry/testonly.rs @@ -0,0 +1,118 @@ +use rand::Rng; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; +use zksync_test_account::Account; +use zksync_types::{ethabi, Execute, Transaction, U256}; + +use super::*; + +pub(crate) fn make_tx( + account: &mut Account, + address: crate::abi::Address, + call: crate::abi::Call, +) -> Transaction { + account.get_l2_tx_for_execute( + Execute { + contract_address: *address, + calldata: call.calldata().unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ) +} + +pub(crate) struct WeightedValidator { + weight: validator::Weight, + key: validator::PublicKey, + pop: validator::ProofOfPossession, +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256k1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256k1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::BLS12_381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::BLS12_381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::BLS12_381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::BLS12_381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +pub(crate) fn gen_validator(rng: &mut impl Rng) -> WeightedValidator { + let k: validator::SecretKey = rng.gen(); + WeightedValidator { + key: k.public(), + weight: rng.gen_range(1..100), + pop: k.sign_pop(), + } +} + +pub(crate) fn gen_attester(rng: &mut impl Rng) -> attester::WeightedAttester { + attester::WeightedAttester { + key: rng.gen(), + weight: rng.gen_range(1..100), + } +} + +impl Registry { + pub(crate) fn deploy(&self, account: &mut Account) -> (Address, Transaction) { + let tx = account.get_deploy_tx( + &abi::ConsensusRegistry::bytecode(), + None, + zksync_test_account::TxType::L2, + ); + (Address::new(tx.address), tx.tx) + } + + pub(crate) fn add( + &self, + node_owner: ethabi::Address, + validator: WeightedValidator, + attester: attester::WeightedAttester, + ) -> anyhow::Result> { + Ok(self.contract.call(abi::Add { + node_owner, + validator_pub_key: encode_validator_key(&validator.key), + validator_weight: validator + .weight + .try_into() + .context("overflow") + .context("validator_weight")?, + validator_pop: encode_validator_pop(&validator.pop), + attester_pub_key: encode_attester_key(&attester.key), + attester_weight: attester + .weight + .try_into() + .context("overflow") + .context("attester_weight")?, + })) + } + + pub(crate) fn initialize( + &self, + initial_owner: ethabi::Address, + ) -> crate::abi::Call { + self.contract.call(abi::Initialize { initial_owner }) + } + + pub(crate) fn commit_attester_committee( + &self, + ) -> crate::abi::Call { + self.contract.call(abi::CommitAttesterCommittee) + } +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs new file mode 100644 index 000000000000..935cd6738918 --- /dev/null +++ b/core/node/consensus/src/registry/tests.rs @@ -0,0 +1,91 @@ +use rand::Rng as _; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::{attester, validator::testonly::Setup}; +use zksync_test_account::Account; +use zksync_types::ProtocolVersionId; + +use super::*; +use crate::storage::ConnectionPool; + +/// Test checking that parsing logic matches the abi specified in the json file. +#[test] +fn test_consensus_registry_abi() { + zksync_concurrency::testonly::abort_on_panic(); + let c = abi::ConsensusRegistry::load(); + c.call(abi::GetAttesterCommittee).test().unwrap(); + c.call(abi::Add::default()).test().unwrap(); + c.call(abi::Initialize::default()).test().unwrap(); + c.call(abi::CommitAttesterCommittee).test().unwrap(); + c.call(abi::Owner).test().unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_attester_committee() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 10); + let account = &mut Account::random(); + let to_fund = &[account.address]; + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, ProtocolVersionId::latest()).await; + let registry = Registry::new(setup.genesis.clone(), pool.clone()).await; + + // If the registry contract address is not specified, + // then the committee from genesis should be returned. + let got = registry + .attester_committee_for(ctx, None, attester::BatchNumber(10)) + .await + .unwrap(); + assert_eq!(setup.genesis.attesters, got); + + let (mut node, runner) = crate::testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx, to_fund)); + + // Deploy registry contract and initialize it. + let committee = + attester::Committee::new((0..5).map(|_| testonly::gen_attester(rng))).unwrap(); + let (registry_addr, tx) = registry.deploy(account); + let mut txs = vec![tx]; + let account_addr = account.address(); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account_addr), + )); + // Add attesters. + for a in committee.iter() { + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add(rng.gen(), testonly::gen_validator(rng), a.clone()) + .unwrap(), + )); + } + // Commit the update. + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + + node.push_block(&txs).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_batch()).await?; + + // Read the attester committee using the vm. + let batch = attester::BatchNumber(node.last_batch().0.into()); + assert_eq!( + Some(committee), + registry + .attester_committee_for(ctx, Some(registry_addr), batch + 1) + .await + .unwrap() + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 6ff2fb1ce0a0..512b37e81a11 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,13 +1,14 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_crypto::keccak256::Keccak256; -use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -18,7 +19,7 @@ pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); impl ConnectionPool { /// Wrapper for `connection_tagged()`. - pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + pub(crate) async fn connection(&self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( ctx.wait(self.0.connection_tagged("consensus")) .await? @@ -164,6 +165,22 @@ impl<'a> Connection<'a> { .map_err(E::Other)?) } + /// Wrapper for `consensus_dal().upsert_attester_committee()`. + pub async fn upsert_attester_committee( + &mut self, + ctx: &ctx::Ctx, + number: BatchNumber, + committee: &attester::Committee, + ) -> ctx::Result<()> { + ctx.wait( + self.0 + .consensus_dal() + .upsert_attester_committee(number, committee), + ) + .await??; + Ok(()) + } + /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { Ok(ctx @@ -229,22 +246,22 @@ impl<'a> Connection<'a> { }) } - /// Wrapper for `consensus_dal().genesis()`. - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) + /// Wrapper for `consensus_dal().global_config()`. + pub async fn global_config( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } - /// Wrapper for `consensus_dal().try_update_genesis()`. - pub async fn try_update_genesis( + /// Wrapper for `consensus_dal().try_update_global_config()`. + pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - genesis: &validator::Genesis, + cfg: &consensus_dal::GlobalConfig, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .wait(self.0.consensus_dal().try_update_global_config(cfg)) .await??) } @@ -267,7 +284,7 @@ impl<'a> Connection<'a> { /// (Re)initializes consensus genesis to start at the last L2 block in storage. /// Noop if `spec` matches the current genesis. - pub(crate) async fn adjust_genesis( + pub(crate) async fn adjust_global_config( &mut self, ctx: &ctx::Ctx, spec: &config::GenesisSpec, @@ -277,31 +294,34 @@ impl<'a> Connection<'a> { .await .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; + let old = txn.global_config(ctx).await.wrap("genesis()")?; if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { + if &config::GenesisSpec::from_global_config(old) == spec { // Hard fork is not needed. return Ok(()); } } tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: txn.next_block(ctx).await.context("next_block()")?, - protocol_version: spec.protocol_version, - validators: spec.validators.clone(), - attesters: spec.attesters.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); + let new = consensus_dal::GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { + old.genesis.fork_number.next() + }), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(), + registry_address: spec.registry_address, + }; - txn.try_update_genesis(ctx, &genesis) + txn.try_update_global_config(ctx, &new) .await - .wrap("try_update_genesis()")?; + .wrap("try_update_global_config()")?; txn.commit(ctx).await.wrap("commit()")?; Ok(()) } @@ -447,4 +467,29 @@ impl<'a> Connection<'a> { .await? .context("attestation_status()")?) } + + /// Constructs `BlockArgs` for the last block of the batch. + pub async fn vm_block_args( + &mut self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + ) -> ctx::Result { + let (_, block) = self + .get_l2_block_range_of_l1_batch(ctx, batch) + .await + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not sealed")?; + let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); + let start_info = ctx + .wait(BlockStartInfo::new( + &mut self.0, + /*max_cache_age=*/ std::time::Duration::from_secs(10), + )) + .await? + .context("BlockStartInfo::new()")?; + Ok(ctx + .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .await? + .context("BlockArgs::new")?) + } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 6a96812ae408..cb8e039d7d01 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -325,9 +325,10 @@ impl storage::PersistentBlockStore for Store { Ok(self .conn(ctx) .await? - .genesis(ctx) + .global_config(ctx) .await? - .context("not found")?) + .context("not found")? + .genesis) } fn persisted(&self) -> sync::watch::Receiver { diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5d1279afbbfd..65c464d98b93 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -1,5 +1,4 @@ //! Storage test helpers. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::{attester, validator}; @@ -13,6 +12,7 @@ use zksync_types::{ }; use super::{Connection, ConnectionPool}; +use crate::registry; impl Connection<'_> { /// Wrapper for `consensus_dal().batch_of_block()`. @@ -181,16 +181,16 @@ impl ConnectionPool { want_last: validator::BlockNumber, ) -> ctx::Result> { let blocks = self.wait_for_block_certificates(ctx, want_last).await?; - let genesis = self + let cfg = self .connection(ctx) .await .wrap("connection()")? - .genesis(ctx) + .global_config(ctx) .await .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&genesis).context(block.number())?; + block.verify(&cfg.genesis).context(block.number())?; } Ok(blocks) } @@ -199,6 +199,7 @@ impl ConnectionPool { &self, ctx: &ctx::Ctx, want_last: attester::BatchNumber, + registry_addr: Option, ) -> ctx::Result<()> { // Wait for the last batch to be attested. const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); @@ -214,17 +215,17 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } let mut conn = self.connection(ctx).await.wrap("connection()")?; - let genesis = conn - .genesis(ctx) + let cfg = conn + .global_config(ctx) .await - .wrap("genesis()")? - .context("genesis is missing")?; + .wrap("global_config()")? + .context("global config is missing")?; let first = conn - .batch_of_block(ctx, genesis.first_block) + .batch_of_block(ctx, cfg.genesis.first_block) .await .wrap("batch_of_block()")? .context("batch of first_block is missing")?; - let committee = genesis.attesters.as_ref().unwrap(); + let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); let hash = conn @@ -240,8 +241,13 @@ impl ConnectionPool { if cert.message.hash != hash { return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); } - cert.verify(genesis.hash(), committee) - .context("cert[{i:?}].verify()")?; + let committee = registry + .attester_committee_for(ctx, registry_addr, i) + .await + .context("attester_committee_for()")? + .context("committee not specified")?; + cert.verify(cfg.genesis.hash(), &committee) + .with_context(|| format!("cert[{i:?}].verify()"))?; } Ok(()) } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 0537aaabc563..241998f26928 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -30,19 +30,21 @@ use zksync_node_sync::{ }; use zksync_node_test_utils::{create_l1_batch_metadata, l1_batch_metadata_to_commitment_artifacts}; use zksync_state_keeper::{ + executor::MainBatchExecutorFactory, io::{IoCursor, L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, testonly::{ fund, l1_transaction, l2_transaction, test_batch_executor::MockReadStorageFactory, MockBatchExecutor, }, - AsyncRocksdbCache, MainBatchExecutor, OutputHandler, StateKeeperPersistence, - TreeWritesPersistence, ZkSyncStateKeeper, + AsyncRocksdbCache, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, + ZkSyncStateKeeper, }; use zksync_test_account::Account; use zksync_types::{ + ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; @@ -53,6 +55,7 @@ use crate::{ }; /// Fake StateKeeper for tests. +#[derive(Debug)] pub(super) struct StateKeeper { protocol_version: ProtocolVersionId, // Batch of the `last_block`. @@ -61,8 +64,6 @@ pub(super) struct StateKeeper { // timestamp of the last block. last_timestamp: u64, batch_sealed: bool, - // test L2 account - account: Account, next_priority_op: PriorityOpId, actions_sender: ActionQueueSender, @@ -115,6 +116,7 @@ pub(super) fn new_configs( }) .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + registry_address: None, }; network::testonly::new_configs(rng, setup, gossip_peers) .into_iter() @@ -182,7 +184,6 @@ pub(super) struct StateKeeperRunner { addr: sync::watch::Sender>, rocksdb_dir: tempfile::TempDir, metadata_calculator: MetadataCalculator, - account: Account, } impl StateKeeper { @@ -241,7 +242,6 @@ impl StateKeeper { .await .context("MetadataCalculator::new()")?; let tree_reader = metadata_calculator.tree_reader(); - let account = Account::random(); Ok(( Self { protocol_version, @@ -255,7 +255,6 @@ impl StateKeeper { addr: addr.subscribe(), pool: pool.clone(), tree_reader, - account: account.clone(), }, StateKeeperRunner { actions_queue, @@ -264,7 +263,6 @@ impl StateKeeper { addr, rocksdb_dir, metadata_calculator, - account, }, )) } @@ -305,22 +303,29 @@ impl StateKeeper { } } - /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. - pub async fn push_random_block(&mut self, rng: &mut impl Rng) { + pub async fn push_block(&mut self, txs: &[Transaction]) { let mut actions = vec![self.open_block()]; - for _ in 0..rng.gen_range(3..8) { - let tx = match rng.gen() { - true => l2_transaction(&mut self.account, 1_000_000), + actions.extend( + txs.iter() + .map(|tx| FetchedTransaction::new(tx.clone()).into()), + ); + actions.push(SyncAction::SealL2Block); + self.actions_sender.push_actions(actions).await.unwrap(); + } + + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. + pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { + let txs: Vec<_> = (0..rng.gen_range(3..8)) + .map(|_| match rng.gen() { + true => l2_transaction(account, 1_000_000), false => { - let tx = l1_transaction(&mut self.account, self.next_priority_op); + let tx = l1_transaction(account, self.next_priority_op); self.next_priority_op += 1; tx } - }; - actions.push(FetchedTransaction::new(tx).into()); - } - actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await.unwrap(); + }) + .collect(); + self.push_block(&txs).await; } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -333,14 +338,19 @@ impl StateKeeper { } /// Pushes `count` random L2 blocks to the StateKeeper. - pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { + pub async fn push_random_blocks( + &mut self, + rng: &mut impl Rng, + account: &mut Account, + count: usize, + ) { for _ in 0..count { // 20% chance to seal an L1 batch. // `seal_batch()` also produces a (fictive) block. if rng.gen_range(0..100) < 20 { self.seal_batch().await; } else { - self.push_random_block(rng).await; + self.push_random_block(rng, account).await; } } } @@ -450,7 +460,13 @@ impl StateKeeper { client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) + .run( + ctx, + self.actions_sender, + cfgs.config, + cfgs.secrets, + cfgs.net.build_version, + ) .await } } @@ -533,14 +549,21 @@ async fn mock_metadata_calculator_step(ctx: &ctx::Ctx, pool: &ConnectionPool) -> impl StateKeeperRunner { // Executes the state keeper task with real metadata calculator task // and fake commitment generator (because real one is too slow). - pub async fn run_real(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run_real( + self, + ctx: &ctx::Ctx, + addrs_to_fund: &[ethabi::Address], + ) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { - // Fund the test account. Required for L2 transactions to succeed. - fund(&self.pool.0, &[self.account.address]).await; + // Fund the test accounts. Required for L2 transactions to succeed. + fund(&self.pool.0, addrs_to_fund).await; let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let io = ExternalIO::new( self.pool.0.clone(), @@ -592,12 +615,13 @@ impl StateKeeperRunner { }); s.spawn_bg({ + let executor_factory = MainBatchExecutorFactory::new(false, false); let stop_recv = stop_recv.clone(); async { ZkSyncStateKeeper::new( stop_recv, Box::new(io), - Box::new(MainBatchExecutor::new(false, false)), + Box::new(executor_factory), OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(self.sync_state.clone())), Arc::new(NoopSealer), @@ -647,8 +671,11 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index b245d0524aa9..abd35508c7f7 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,17 +1,24 @@ use anyhow::Context as _; -use test_casing::{test_casing, Product}; +use rand::Rng as _; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ attester, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; +use super::VERSIONS; +use crate::{ + mn::run_main_node, + registry::{testonly, Registry}, + storage::ConnectionPool, + testonly::{new_configs, StateKeeper}, +}; #[test_casing(2, VERSIONS)] #[tokio::test] @@ -19,24 +26,31 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let (mut sk, runner) = StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); let setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; pool.wait_for_batch(ctx, first_batch).await?; @@ -44,11 +58,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Connect to API endpoint. let api = sk.connect(ctx).await?; let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? + let s = ctx + .wait(api.attestation_status()) + .await?? .context("no attestation_status")?; - let s: AttestationStatus = + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) @@ -62,24 +76,37 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { attester::BatchNumber(first_batch.0.into()) ); - // Insert a (fake) cert, then check again. + tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let gcfg = conn.global_config(ctx).await?.unwrap(); + let m = attester::Batch { + number, + hash, + genesis: gcfg.genesis.hash(), + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, + signatures: sigs, + message: m, }; + conn.upsert_attester_committee( + ctx, + cert.message.number, + setup.genesis.attesters.as_ref().unwrap(), + ) + .await + .context("upsert_attester_committee")?; conn.insert_batch_certificate(ctx, &cert) .await .context("insert_batch_certificate()")?; } + tracing::info!("Check again."); let want = status.next_batch_to_attest.next(); let got = fetch_status().await?; assert_eq!(want, got.next_batch_to_attest); @@ -93,34 +120,65 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -// -// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, -// however as of now it doesn't work with ENs and it doesn't work with -// `ConnectionPool::from_snapshot`. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let cfgs = testonly::new_configs(rng, &setup, NODES); - + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("validator")) .await .context("validator") }); - // API server needs at least 1 L1 batch to start. + + tracing::info!("deploy registry with 1 attester"); + let attesters: Vec<_> = setup.genesis.attesters.as_ref().unwrap().iter().collect(); + let registry = Registry::new(setup.genesis.clone(), validator_pool.clone()).await; + let (registry_addr, tx) = registry.deploy(account); + cfgs[0] + .config + .genesis_spec + .as_mut() + .unwrap() + .registry_address = Some(*registry_addr); + let mut txs = vec![tx]; + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account.address), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[0].clone(), + ) + .unwrap(), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + validator.push_block(&txs).await; validator.seal_batch().await; + + tracing::info!("wait for the batch to be processed before starting consensus"); validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -137,13 +195,13 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId let mut node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = ConnectionPool::test(from_snapshot, version).await; - let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let pool = ConnectionPool::test(false, version).await; + let (node, runner) = StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { let i = i; runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("node", i = *i)) .await .with_context(|| format!("node{}", *i)) @@ -151,13 +209,31 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } - tracing::info!("Create some batches"); - validator.push_random_blocks(rng, 20).await; - validator.seal_batch().await; + tracing::info!("add attesters one by one"); + #[allow(clippy::needless_range_loop)] + for i in 1..attesters.len() { + let txs = vec![ + testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[i].clone(), + ) + .unwrap(), + ), + testonly::make_tx(account, registry_addr, registry.commit_attester_committee()), + ]; + validator.push_block(&txs).await; + validator.seal_batch().await; + } + tracing::info!("Wait for the batches to be attested"); let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); validator_pool - .wait_for_batch_certificates_and_verify(ctx, want_last) + .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; Ok(()) }) diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs index 41d73fdb87c6..f0cae7f2c02e 100644 --- a/core/node/consensus/src/tests/batch.rs +++ b/core/node/consensus/src/tests/batch.rs @@ -1,6 +1,7 @@ use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope}; use zksync_consensus_roles::validator; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; use super::{FROM_SNAPSHOT, VERSIONS}; @@ -13,6 +14,7 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(from_snapshot, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks and L1 batches in a way that the // last L1 batch is guaranteed to have some L2 blocks executed in it. @@ -23,11 +25,11 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion for _ in 0..3 { for _ in 0..2 { - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; } sk.seal_batch().await; } - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; pool.wait_for_payload(ctx, sk.last_block()).await?; @@ -84,11 +86,13 @@ async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); + s.spawn_bg(runner.run_real(ctx, to_fund)); tracing::info!("analyzing storage"); { @@ -101,7 +105,7 @@ async fn test_batch_witness(version: ProtocolVersionId) { } // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; + node.push_random_blocks(rng, account, 10).await; node.seal_batch().await; pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; // We can verify only 2nd batch onward, because diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 0b611d55f06a..91f01f865a2b 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -7,6 +7,8 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use crate::{ @@ -28,6 +30,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(false, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -35,15 +38,21 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); setup.first_block = validator::BlockNumber(4); let mut setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; for i in setup.genesis.first_block.0..sk.last_block().next().0 { let i = validator::BlockNumber(i); let payload = conn @@ -95,6 +104,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -103,7 +113,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx)); tracing::info!("Populate storage with a bunch of blocks."); - sk.push_random_blocks(rng, 5).await; + sk.push_random_blocks(rng, account, 5).await; pool .wait_for_payload(ctx, sk.last_block()) .await @@ -118,7 +128,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); - sk.push_random_blocks(rng, 3).await; + sk.push_random_blocks(rng, account, 3).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -126,7 +136,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { - sk.push_random_blocks(rng, 1).await; + sk.push_random_blocks(rng, account, 1).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -158,6 +168,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -173,7 +184,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { )); tracing::info!("produce some batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -191,7 +202,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -209,7 +220,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more blocks and compare storages"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -243,6 +254,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); // topology: // validator <-> node <-> node <-> ... @@ -264,7 +276,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .context("validator") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. validator.seal_batch().await; validator_pool @@ -299,7 +311,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Make validator produce blocks and wait for fetchers to get them."); // Note that block from before and after genesis have to be fetched. - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -328,6 +340,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); let cfgs = testonly::new_configs(rng, &setup, 1); + let account = &mut Account::random(); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -342,7 +355,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .context("main_node") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. main_node.seal_batch().await; main_node_pool @@ -381,7 +394,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -409,6 +422,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -433,7 +447,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) .await?; @@ -447,7 +461,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -461,7 +475,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -488,6 +502,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -535,7 +550,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); tracing::info!("Sync some blocks"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; let to_prune = validator.last_sealed_batch(); tracing::info!( @@ -546,7 +561,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { tracing::info!( "Seal another batch to make sure that there is at least 1 sealed batch after pruning." ); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_batch(ctx, validator.last_sealed_batch()) @@ -565,7 +580,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { .prune_batches(ctx, to_prune) .await .context("prune_batches")?; - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; node_pool .wait_for_block_certificates(ctx, validator.last_block()) .await @@ -582,6 +597,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); @@ -601,7 +617,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); tracing::info!("Produce some blocks and wait for node to fetch them"); - validator.push_random_blocks(rng, 10).await; + validator.push_random_blocks(rng, account, 10).await; let want = validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs new file mode 100644 index 000000000000..11b6b5c67e3b --- /dev/null +++ b/core/node/consensus/src/vm.rs @@ -0,0 +1,98 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_roles::attester; +use zksync_node_api_server::{ + execution_sandbox::{TransactionExecutor, TxSetupArgs, VmConcurrencyLimiter}, + tx_sender::MultiVMBaseSystemContracts, +}; +use zksync_state::PostgresStorageCaches; +use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, +}; +use zksync_vm_interface::{ + ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, +}; + +use crate::{abi, storage::ConnectionPool}; + +/// VM executes eth_calls on the db. +#[derive(Debug)] +pub(crate) struct VM { + pool: ConnectionPool, + setup_args: TxSetupArgs, + limiter: VmConcurrencyLimiter, +} + +impl VM { + /// Constructs a new `VM` instance. + pub async fn new(pool: ConnectionPool) -> Self { + Self { + pool, + setup_args: TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, + operator_account: AccountTreeId::default(), + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + base_system_contracts: scope::wait_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await, + caches: PostgresStorageCaches::new(1, 1), + validation_computational_gas_limit: u32::MAX, + chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, + }, + limiter: VmConcurrencyLimiter::new(1).0, + } + } + + // FIXME (PLA-1018): switch to oneshot executor + pub async fn call( + &self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + address: abi::Address, + call: abi::Call, + ) -> ctx::Result { + let tx = L2Tx::new( + *address, + call.calldata().context("call.calldata()")?, + Nonce(0), + Fee { + gas_limit: U256::from(2000000000u32), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), + }, + ethabi::Address::zero(), + U256::zero(), + vec![], + Default::default(), + ); + let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + let args = conn + .vm_block_args(ctx, batch) + .await + .wrap("vm_block_args()")?; + let output = ctx + .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( + permit, + self.setup_args.clone(), + TxExecutionArgs::for_eth_call(tx.clone()), + conn.0, + args, + None, + OneshotTracingParams::default(), + )) + .await? + .context("execute_tx_in_sandbox()")?; + match output.vm.result { + ExecutionResult::Success { output } => { + Ok(call.decode_outputs(&output).context("decode_output()")?) + } + other => Err(anyhow::format_err!("unsuccessful execution: {other:?}").into()), + } + } +} diff --git a/core/node/consistency_checker/Cargo.toml b/core/node/consistency_checker/Cargo.toml index 769690b493a4..ed2cbd5bbd79 100644 --- a/core/node/consistency_checker/Cargo.toml +++ b/core/node/consistency_checker/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true zksync_eth_client.workspace = true -zksync_eth_sender.workspace = true zksync_health_check.workspace = true zksync_l1_contract_interface.workspace = true zksync_shared_metrics.workspace = true diff --git a/core/lib/default_da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml similarity index 51% rename from core/lib/default_da_clients/Cargo.toml rename to core/node/da_clients/Cargo.toml index 737d209aed31..60b65067f48d 100644 --- a/core/lib/default_da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "zksync_default_da_clients" -description = "ZKsync DA client implementations" +name = "zksync_da_clients" +description = "ZKsync data availability clients" version.workspace = true edition.workspace = true authors.workspace = true @@ -16,10 +16,24 @@ tracing.workspace = true async-trait.workspace = true anyhow.workspace = true flate2.workspace = true +tokio.workspace = true zksync_config.workspace = true zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true -zksync_node_framework.workspace = true zksync_env_config.workspace = true +futures.workspace = true + +# Avail dependencies +scale-encode.workspace = true +subxt-metadata.workspace = true +blake2.workspace = true +base58.workspace = true +serde_json.workspace = true +hex.workspace = true +blake2b_simd.workspace = true + +jsonrpsee = { workspace = true, features = ["ws-client"] } +parity-scale-codec = { workspace = true, features = ["derive"] } +subxt-signer = { workspace = true, features = ["sr25519", "native"] } diff --git a/core/node/da_clients/README.md b/core/node/da_clients/README.md new file mode 100644 index 000000000000..df06cef24197 --- /dev/null +++ b/core/node/da_clients/README.md @@ -0,0 +1,10 @@ +# Data Availability Clients + +This crate contains the implementations of the Data Availability clients. + +Currently, the following DataAvailability clients are implemented: + +- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode + utilizing the DA framework. +- `Object Store client` that stores the pubdata in the Object Store(GCS). +- `Avail` that sends the pubdata to the Avail DA layer. diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs new file mode 100644 index 000000000000..021906d73a01 --- /dev/null +++ b/core/node/da_clients/src/avail/client.rs @@ -0,0 +1,85 @@ +use std::{fmt::Debug, sync::Arc}; + +use async_trait::async_trait; +use jsonrpsee::ws_client::WsClientBuilder; +use zksync_config::AvailConfig; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use crate::avail::sdk::RawAvailClient; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub struct AvailClient { + config: AvailConfig, + sdk_client: Arc, +} + +impl AvailClient { + pub async fn new(config: AvailConfig) -> anyhow::Result { + let sdk_client = RawAvailClient::new(config.app_id, config.seed.clone()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(sdk_client), + }) + } +} + +#[async_trait] +impl DataAvailabilityClient for AvailClient { + async fn dispatch_blob( + &self, + _: u32, // batch_number + data: Vec, + ) -> anyhow::Result { + let client = WsClientBuilder::default() + .build(self.config.api_node_url.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + + let extrinsic = self + .sdk_client + .build_extrinsic(&client, data) + .await + .map_err(to_non_retriable_da_error)?; + + let block_hash = self + .sdk_client + .submit_extrinsic(&client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = self + .sdk_client + .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + + async fn get_inclusion_data( + &self, + _blob_id: &str, + ) -> anyhow::Result, DAError> { + // TODO: implement inclusion data retrieval + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(RawAvailClient::MAX_BLOB_SIZE) + } +} + +pub fn to_non_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: false, + } +} diff --git a/core/node/da_clients/src/avail/mod.rs b/core/node/da_clients/src/avail/mod.rs new file mode 100644 index 000000000000..82073448ba15 --- /dev/null +++ b/core/node/da_clients/src/avail/mod.rs @@ -0,0 +1,4 @@ +mod client; +mod sdk; + +pub use self::client::AvailClient; diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs new file mode 100644 index 000000000000..5e67540fcc69 --- /dev/null +++ b/core/node/da_clients/src/avail/sdk.rs @@ -0,0 +1,371 @@ +//! Minimal reimplementation of the Avail SDK client required for the DA client implementation. +//! This is considered to be a temporary solution until a mature SDK is available on crates.io + +use std::fmt::Debug; + +use jsonrpsee::{ + core::client::{Client, ClientT, Subscription, SubscriptionClientT}, + rpc_params, +}; +use parity_scale_codec::{Compact, Decode, Encode}; +use scale_encode::EncodeAsFields; +use subxt_signer::{ + bip39::Mnemonic, + sr25519::{Keypair, Signature}, +}; + +use crate::avail::client::to_non_retriable_da_error; + +const PROTOCOL_VERSION: u8 = 4; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct RawAvailClient { + app_id: u32, + keypair: Keypair, +} + +/// Utility type needed for encoding the call data +#[derive(parity_scale_codec::Encode, scale_encode::EncodeAsType)] +#[encode_as_type(crate_path = "scale_encode")] +struct SubmitData { + pub data: BoundedVec, +} + +/// Utility type needed for encoding the call data +#[derive(parity_scale_codec::Encode, scale_encode::EncodeAsType)] +#[encode_as_type(crate_path = "scale_encode")] +struct BoundedVec<_0>(pub Vec<_0>); + +impl RawAvailClient { + pub(crate) const MAX_BLOB_SIZE: usize = 512 * 1024; // 512kb + + pub(crate) async fn new(app_id: u32, seed: String) -> anyhow::Result { + let mnemonic = Mnemonic::parse(seed)?; + let keypair = Keypair::from_phrase(&mnemonic, None)?; + + Ok(Self { app_id, keypair }) + } + + /// Returns a hex-encoded extrinsic + pub(crate) async fn build_extrinsic( + &self, + client: &Client, + data: Vec, + ) -> anyhow::Result { + let call_data = self + .get_encoded_call(client, data) + .await + .map_err(to_non_retriable_da_error)?; + let extra_params = self + .get_extended_params(client) + .await + .map_err(to_non_retriable_da_error)?; + let additional_params = self + .get_additional_params(client) + .await + .map_err(to_non_retriable_da_error)?; + + let signature = self.get_signature( + call_data.as_slice(), + extra_params.as_slice(), + additional_params.as_slice(), + ); + + let ext = self.get_submittable_extrinsic( + signature, + extra_params.as_slice(), + call_data.as_slice(), + ); + + Ok(hex::encode(&ext)) + } + + /// Returns an encoded call data + async fn get_encoded_call( + &self, + client: &Client, + data: Vec, + ) -> anyhow::Result, anyhow::Error> { + let resp: serde_json::Value = client.request("state_getMetadata", rpc_params![]).await?; + + let resp = resp + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid metadata"))? + .to_string(); + + let metadata_bytes = hex::decode( + resp.strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Metadata doesn't have 0x prefix"))?, + )?; + let meta = subxt_metadata::Metadata::decode(&mut &metadata_bytes[..])?; + + let pallet = meta + .pallet_by_name("DataAvailability") + .ok_or_else(|| anyhow::anyhow!("DataAvailability pallet not found"))?; + + let call = pallet + .call_variant_by_name("submit_data") + .ok_or_else(|| anyhow::anyhow!("submit_data call not found"))?; + + let mut fields = call + .fields + .iter() + .map(|f| scale_encode::Field::new(f.ty.id, f.name.as_deref())); + + let mut bytes = Vec::new(); + pallet.index().encode_to(&mut bytes); + call.index.encode_to(&mut bytes); + + SubmitData { + data: BoundedVec(data), + } + .encode_as_fields_to(&mut fields, meta.types(), &mut bytes)?; + + Ok(bytes) + } + + /// Queries a node for a nonce + async fn fetch_account_nonce(&self, client: &Client) -> anyhow::Result { + let address = to_addr(self.keypair.clone()); + let resp: serde_json::Value = client + .request("system_accountNextIndex", rpc_params![address]) + .await?; + + let nonce = resp + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid nonce"))?; + + Ok(nonce) + } + + /// Returns a Compact-encoded extended extrinsic parameters + /// Extrinsic params used here: + /// - CheckMortality + /// - CheckNonce + /// - ChargeTransactionPayment + /// - CheckAppId + async fn get_extended_params(&self, client: &Client) -> anyhow::Result> { + let era = 0u8; // immortal era + let tip = 0u128; // no tip + let nonce = self.fetch_account_nonce(client).await?; + + // Encode the params + let mut bytes = vec![era]; + Compact(nonce).encode_to(&mut bytes); + Compact(tip).encode_to(&mut bytes); + Compact(self.app_id).encode_to(&mut bytes); + + Ok(bytes) + } + + /// Returns a Compact-encoded additional extrinsic parameters + /// Extrinsic params used here + /// - CheckSpecVersion + /// - CheckTxVersion + /// - CheckGenesis + async fn get_additional_params(&self, client: &Client) -> anyhow::Result> { + let (spec_version, tx_version) = self.get_runtime_version(client).await?; + let genesis_hash = self.fetch_genesis_hash(client).await?; + + let mut bytes = Vec::new(); + spec_version.encode_to(&mut bytes); + tx_version.encode_to(&mut bytes); + // adding genesis hash twice (that's what API requires ¯\_(ツ)_/¯) + bytes.extend(hex::decode(&genesis_hash)?); + bytes.extend(hex::decode(&genesis_hash)?); + + Ok(bytes) + } + + /// Returns the specification and transaction versions of a runtime + async fn get_runtime_version(&self, client: &Client) -> anyhow::Result<(u32, u32)> { + let resp: serde_json::Value = client + .request("chain_getRuntimeVersion", rpc_params![]) + .await?; + + let sv = resp + .get("specVersion") + .ok_or_else(|| anyhow::anyhow!("Invalid runtime version"))?; + let tv = resp + .get("transactionVersion") + .ok_or_else(|| anyhow::anyhow!("Invalid runtime version"))?; + + let spec_version = sv + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid spec version"))?; + let transaction_version = tv + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid transaction version"))?; + + Ok((spec_version as u32, transaction_version as u32)) + } + + async fn fetch_genesis_hash(&self, client: &Client) -> anyhow::Result { + let resp: serde_json::Value = client.request("chain_getBlockHash", rpc_params![0]).await?; + + let genesis_hash = resp + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid genesis hash"))?; + + Ok(genesis_hash + .strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Genesis hash doesn't have a 0x prefix"))? + .to_string()) + } + + /// Returns a signature for partially-encoded extrinsic + fn get_signature( + &self, + call_data: &[u8], + extra_params: &[u8], + additional_params: &[u8], + ) -> Signature { + let mut bytes = vec![]; + bytes.extend_from_slice(call_data); + bytes.extend_from_slice(extra_params); + bytes.extend_from_slice(additional_params); + + if bytes.len() > 256 { + bytes = blake2::<32>(bytes).to_vec(); + } + + self.keypair.sign(&bytes) + } + + /// Encodes all the components of an extrinsic into a single vector + fn get_submittable_extrinsic( + &self, + signature: Signature, + extra_params: &[u8], + call_data: &[u8], + ) -> Vec { + let mut encoded_inner = Vec::new(); + (0b10000000 + PROTOCOL_VERSION).encode_to(&mut encoded_inner); // "is signed" + transaction protocol version + + // sender + encoded_inner.push(0); // 0 as an id param in MultiAddress enum + self.keypair.public_key().0.encode_to(&mut encoded_inner); // from address for signature + + // signature + encoded_inner.push(1); // 1 as an Sr25519 in MultiSignature enum + signature.0.encode_to(&mut encoded_inner); + + // extra params + encoded_inner.extend_from_slice(extra_params); + + // call data + encoded_inner.extend_from_slice(call_data); + + // now, prefix with byte length: + let len = Compact( + u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), + ); + let mut encoded = Vec::new(); + len.encode_to(&mut encoded); + encoded.extend(encoded_inner); + + encoded + } + + /// Submits an extrinsic. Subscribes to a stream and waits for a tx to be included in a block + /// to return the block hash + pub(crate) async fn submit_extrinsic( + &self, + client: &Client, + extrinsic: &str, + ) -> anyhow::Result { + let mut sub: Subscription = client + .subscribe( + "author_submitAndWatchExtrinsic", + rpc_params![extrinsic], + "author_unwatchExtrinsic", + ) + .await?; + + let block_hash = loop { + let status = sub.next().await.transpose()?; + + if status.is_some() && status.as_ref().unwrap().is_object() { + if let Some(block_hash) = status.unwrap().get("inBlock") { + break block_hash + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? + .strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Block hash doesn't have 0x prefix"))? + .to_string(); + } + } + }; + sub.unsubscribe().await?; + + Ok(block_hash) + } + + /// Iterates over all transaction in the block and finds an ID of the one provided as an argument + pub(crate) async fn get_tx_id( + &self, + client: &Client, + block_hash: &str, + hex_ext: &str, + ) -> anyhow::Result { + let resp: serde_json::Value = client + .request("chain_getBlock", rpc_params![block_hash]) + .await?; + + let block = resp + .get("block") + .ok_or_else(|| anyhow::anyhow!("Invalid block"))?; + let extrinsics = block + .get("extrinsics") + .ok_or_else(|| anyhow::anyhow!("No field named extrinsics in block"))? + .as_array() + .ok_or_else(|| anyhow::anyhow!("Extrinsics field is not an array"))?; + + let hex_ext = format!("0x{}", hex_ext); + + let tx_id = extrinsics + .iter() + .position(|extrinsic| extrinsic.as_str() == Some(hex_ext.as_str())) + .ok_or_else(|| anyhow::anyhow!("Extrinsic not found in block"))?; + + Ok(tx_id) + } +} + +fn blake2(data: Vec) -> [u8; N] { + blake2b_simd::Params::new() + .hash_length(N) + .hash(data.as_slice()) + .as_bytes() + .try_into() + .expect("slice is always the necessary length") +} + +// Taken from subxt accountId implementation +fn to_addr(keypair: Keypair) -> String { + // For serializing to a string to obtain the account nonce, we use the default substrate + // prefix (since we have no way to otherwise pick one). It doesn't really matter, since when + // it's deserialized back in system_accountNextIndex, we ignore this (so long as it's valid). + const SUBSTRATE_SS58_PREFIX: u8 = 42; + // prefix <= 63 just take up one byte at the start: + let mut v = vec![SUBSTRATE_SS58_PREFIX]; + // then push the account ID bytes. + v.extend(keypair.public_key().0); + // then push a 2 byte checksum of what we have so far. + let r = ss58hash(&v); + v.extend(&r[0..2]); + // then encode to base58. + use base58::ToBase58; + v.to_base58() +} + +// Taken from subxt accountId implementation +fn ss58hash(data: &[u8]) -> Vec { + use blake2::{Blake2b512, Digest}; + const PREFIX: &[u8] = b"SS58PRE"; + let mut ctx = Blake2b512::new(); + ctx.update(PREFIX); + ctx.update(data); + ctx.finalize().to_vec() +} diff --git a/core/lib/default_da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs similarity index 71% rename from core/lib/default_da_clients/src/lib.rs rename to core/node/da_clients/src/lib.rs index 3aa2a18cdcec..48311ce4c3f2 100644 --- a/core/lib/default_da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,2 +1,3 @@ +pub mod avail; pub mod no_da; pub mod object_store; diff --git a/core/lib/default_da_clients/src/no_da/client.rs b/core/node/da_clients/src/no_da.rs similarity index 100% rename from core/lib/default_da_clients/src/no_da/client.rs rename to core/node/da_clients/src/no_da.rs diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/node/da_clients/src/object_store.rs similarity index 51% rename from core/lib/default_da_clients/src/object_store/client.rs rename to core/node/da_clients/src/object_store.rs index f05029a8eb1c..55764e8260e0 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/node/da_clients/src/object_store.rs @@ -1,16 +1,20 @@ -use std::sync::Arc; +use std::{ + io::{Read, Write}, + sync::Arc, +}; use async_trait::async_trait; +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use zksync_config::ObjectStoreConfig; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{ + Bucket, ObjectStore, ObjectStoreFactory, StoredObject, _reexports::BoxedError, +}; use zksync_types::L1BatchNumber; -use crate::object_store::types::StorablePubdata; - /// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. #[derive(Clone, Debug)] pub struct ObjectStoreDAClient { @@ -84,3 +88,79 @@ impl DataAvailabilityClient for ObjectStoreDAClient { None } } + +/// Used as a wrapper for the pubdata to be stored in the GCS. +#[derive(Debug)] +struct StorablePubdata { + pub data: Vec, +} + +impl StoredObject for StorablePubdata { + const BUCKET: Bucket = Bucket::DataAvailability; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_{key}_pubdata.gzip") + } + + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(&self.data[..])?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + + Ok(Self { + data: decompressed_bytes, + }) + } +} + +#[cfg(test)] +mod tests { + use tokio::fs; + use zksync_object_store::{MockObjectStore, StoredObject}; + use zksync_types::L1BatchNumber; + + use super::StorablePubdata; + + #[tokio::test] + async fn test_storable_pubdata_deserialization() { + let serialized = fs::read("./src/test_data/l1_batch_123_pubdata.gzip") + .await + .unwrap(); + + let data = StorablePubdata::deserialize(serialized).unwrap().data; + assert_eq!(data[12], 0); + assert_eq!(data[123], 129); + assert_eq!(data[1234], 153); + } + + #[tokio::test] + async fn stored_object_serialization() { + let batch_number = 123; + let data = vec![1, 2, 3, 4, 5, 6, 123, 255, 0, 0]; + + let store = MockObjectStore::arc(); + store + .put( + L1BatchNumber(batch_number), + &StorablePubdata { data: data.clone() }, + ) + .await + .unwrap(); + + let resp = store + .get::(L1BatchNumber(batch_number)) + .await + .unwrap(); + + assert_eq!(data, resp.data); + } +} diff --git a/core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip b/core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip new file mode 100644 index 000000000000..78bc67e94efb Binary files /dev/null and b/core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip differ diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index ea1858da25d3..f8e6f6b31723 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -5,7 +5,10 @@ use chrono::Utc; use rand::Rng; use tokio::sync::watch::Receiver; use zksync_config::DADispatcherConfig; -use zksync_da_client::{types::DAError, DataAvailabilityClient}; +use zksync_da_client::{ + types::{DAError, InclusionData}, + DataAvailabilityClient, +}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_types::L1BatchNumber; @@ -133,16 +136,21 @@ impl DataAvailabilityDispatcher { return Ok(()); }; - let inclusion_data = self - .client - .get_inclusion_data(blob_info.blob_id.as_str()) - .await - .with_context(|| { - format!( - "failed to get inclusion data for blob_id: {}, batch_number: {}", - blob_info.blob_id, blob_info.l1_batch_number - ) - })?; + let inclusion_data = if self.config.use_dummy_inclusion_data() { + self.client + .get_inclusion_data(blob_info.blob_id.as_str()) + .await + .with_context(|| { + format!( + "failed to get inclusion data for blob_id: {}, batch_number: {}", + blob_info.blob_id, blob_info.l1_batch_number + ) + })? + } else { + // if the inclusion verification is disabled, we don't need to wait for the inclusion + // data before committing the batch, so simply return an empty vector + Some(InclusionData { data: vec![] }) + }; let Some(inclusion_data) = inclusion_data else { return Ok(()); diff --git a/core/node/db_pruner/Cargo.toml b/core/node/db_pruner/Cargo.toml index eb21e3e476db..98eba1b6c0ef 100644 --- a/core/node/db_pruner/Cargo.toml +++ b/core/node/db_pruner/Cargo.toml @@ -26,7 +26,6 @@ serde_json.workspace = true [dev-dependencies] assert_matches.workspace = true -test-casing.workspace = true test-log.workspace = true zksync_node_genesis.workspace = true diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index de6a6982088b..1e0bd315b9d9 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -333,16 +333,13 @@ impl Aggregator { // keys that correspond to one on L1. let allowed_patch_versions = storage .protocol_versions_dal() - .get_patch_versions_for_vk( - minor_version, - l1_verifier_config.recursion_scheduler_level_vk_hash, - ) + .get_patch_versions_for_vk(minor_version, l1_verifier_config.snark_wrapper_vk_hash) .await .unwrap(); if allowed_patch_versions.is_empty() { tracing::warn!( "No patch version corresponds to the verification key on L1: {:?}", - l1_verifier_config.recursion_scheduler_level_vk_hash + l1_verifier_config.snark_wrapper_vk_hash ); return None; }; diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 7d6a6b234742..6e9e71d74ea4 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -317,7 +317,7 @@ impl EthTxAggregator { } /// Loads current verifier config on L1 - async fn get_recursion_scheduler_level_vk_hash( + async fn get_snark_wrapper_vk_hash( &mut self, verifier_address: Address, ) -> Result { @@ -344,15 +344,15 @@ impl EthTxAggregator { })?; let contracts_are_pre_shared_bridge = protocol_version_id.is_pre_shared_bridge(); - let recursion_scheduler_level_vk_hash = self - .get_recursion_scheduler_level_vk_hash(verifier_address) + let snark_wrapper_vk_hash = self + .get_snark_wrapper_vk_hash(verifier_address) .await .map_err(|err| { tracing::error!("Failed to get VK hash from the Verifier {err:?}"); err })?; let l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash, }; if let Some(agg_op) = self .aggregator @@ -383,8 +383,14 @@ impl EthTxAggregator { ); return Ok(()); } + let is_gateway = self.settlement_mode.is_gateway(); let tx = self - .save_eth_tx(storage, &agg_op, contracts_are_pre_shared_bridge, false) + .save_eth_tx( + storage, + &agg_op, + contracts_are_pre_shared_bridge, + is_gateway, + ) .await?; Self::report_eth_tx_saving(storage, &agg_op, &tx).await; } @@ -556,9 +562,9 @@ impl EthTxAggregator { // We may be using a custom sender for commit transactions, so use this // var whatever it actually is: a `None` for single-addr operator or `Some` // for multi-addr operator in 4844 mode. - let sender_addr = match op_type { - AggregatedActionType::Commit => self.custom_commit_sender_addr, - _ => None, + let sender_addr = match (op_type, is_gateway) { + (AggregatedActionType::Commit, false) => self.custom_commit_sender_addr, + (_, _) => None, }; let nonce = self.get_next_nonce(&mut transaction, sender_addr).await?; let encoded_aggregated_op = diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index a97aed88a0a5..0d78ab71c62d 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -49,13 +49,18 @@ impl EthTxManager { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, }; + let l1_interface = Box::new(RealL1Interface { + ethereum_gateway, + ethereum_gateway_blobs, + l2_gateway, + wait_confirmations: config.wait_confirmations, + }); + tracing::info!( + "Started eth_tx_manager supporting {:?} operators", + l1_interface.supported_operator_types() + ); Self { - l1_interface: Box::new(RealL1Interface { - ethereum_gateway, - ethereum_gateway_blobs, - l2_gateway, - wait_confirmations: config.wait_confirmations, - }), + l1_interface, config, fees_oracle: Box::new(fees_oracle), pool, @@ -257,10 +262,10 @@ impl EthTxManager { } pub(crate) fn operator_address(&self, operator_type: OperatorType) -> Option
{ - if operator_type == OperatorType::NonBlob { - None - } else { + if operator_type == OperatorType::Blob { self.l1_interface.get_blobs_operator_account() + } else { + None } } // Monitors the in-flight transactions, marks mined ones as confirmed, @@ -519,9 +524,10 @@ impl EthTxManager { tracing::info!("Stop signal received, eth_tx_manager is shutting down"); break; } + let operator_to_track = self.l1_interface.supported_operator_types()[0]; let l1_block_numbers = self .l1_interface - .get_l1_block_numbers(OperatorType::Blob) + .get_l1_block_numbers(operator_to_track) .await?; METRICS.track_block_numbers(&l1_block_numbers); @@ -643,7 +649,7 @@ impl EthTxManager { .get_l1_block_numbers(operator_type) .await .unwrap(); - tracing::info!( + tracing::debug!( "Loop iteration at block {} for {operator_type:?} operator", l1_block_numbers.latest ); diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 8be556b42889..67e603041e6c 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -40,6 +40,7 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; +const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). #[derive(Debug)] @@ -87,75 +88,34 @@ impl EthHttpQueryClient { } } - async fn get_filter_logs( + fn get_default_address_list(&self) -> Vec
{ + [ + Some(self.diamond_proxy_addr), + Some(self.governance_address), + self.state_transition_manager_address, + self.chain_admin_address, + ] + .into_iter() + .flatten() + .collect() + } + + async fn get_events_inner( &self, from: BlockNumber, to: BlockNumber, - topics: Vec, + topics1: Vec, + topics2: Vec, + addresses: Vec
, + retries_left: usize, ) -> EnrichedClientResult> { let filter = FilterBuilder::default() - .address( - [ - Some(self.diamond_proxy_addr), - Some(self.governance_address), - self.state_transition_manager_address, - self.chain_admin_address, - ] - .into_iter() - .flatten() - .collect(), - ) .from_block(from) .to_block(to) - .topics(Some(topics), None, None, None) + .topics(Some(topics1), Some(topics2), None, None) + .address(addresses) .build(); - self.client.logs(&filter).await - } -} - -#[async_trait::async_trait] -impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash( - &self, - verifier_address: Address, - ) -> Result { - // New verifier returns the hash of the verification key. - CallFunctionArgs::new("verificationKeyHash", ()) - .for_contract(verifier_address, &self.verifier_contract_abi) - .call(&self.client) - .await - } - - async fn diamond_cut_by_version( - &self, - packed_version: H256, - ) -> EnrichedClientResult>> { - let Some(state_transition_manager_address) = self.state_transition_manager_address else { - return Ok(None); - }; - - let filter = FilterBuilder::default() - .address(vec![state_transition_manager_address]) - .from_block(BlockNumber::Earliest) - .to_block(BlockNumber::Latest) - .topics( - Some(vec![self.new_upgrade_cut_data_signature]), - Some(vec![packed_version]), - None, - None, - ) - .build(); - let logs = self.client.logs(&filter).await?; - Ok(logs.into_iter().next().map(|log| log.data.0)) - } - - async fn get_events( - &self, - from: BlockNumber, - to: BlockNumber, - retries_left: usize, - ) -> EnrichedClientResult> { - let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; + let mut result = self.client.logs(&filter).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -178,6 +138,7 @@ impl EthClient for EthHttpQueryClient { // check whether the error is related to having too many results if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) + || err_message.contains(TOO_MANY_RESULTS_RETH) { // get the numeric block ids let from_number = match from { @@ -223,6 +184,64 @@ impl EthClient for EthHttpQueryClient { result } +} + +#[async_trait::async_trait] +impl EthClient for EthHttpQueryClient { + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { + // New verifier returns the hash of the verification key. + CallFunctionArgs::new("verificationKeyHash", ()) + .for_contract(verifier_address, &self.verifier_contract_abi) + .call(&self.client) + .await + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + const LOOK_BACK_BLOCK_RANGE: u64 = 1_000_000; + + let Some(state_transition_manager_address) = self.state_transition_manager_address else { + return Ok(None); + }; + + let to_block = self.client.block_number().await?; + let from_block = to_block.saturating_sub((LOOK_BACK_BLOCK_RANGE - 1).into()); + + let logs = self + .get_events_inner( + from_block.into(), + to_block.into(), + vec![self.new_upgrade_cut_data_signature], + vec![packed_version], + vec![state_transition_manager_address], + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + retries_left: usize, + ) -> EnrichedClientResult> { + self.get_events_inner( + from, + to, + self.topics.clone(), + Vec::new(), + self.get_default_address_list(), + retries_left, + ) + .await + } async fn finalized_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { diff --git a/core/node/external_proof_integration_api/Cargo.toml b/core/node/external_proof_integration_api/Cargo.toml index 2e8176cd8832..679e60a11727 100644 --- a/core/node/external_proof_integration_api/Cargo.toml +++ b/core/node/external_proof_integration_api/Cargo.toml @@ -11,11 +11,12 @@ keywords.workspace = true categories.workspace = true [dependencies] -axum.workspace = true +axum = { workspace = true, features = ["multipart"] } +async-trait.workspace = true tracing.workspace = true +thiserror.workspace = true zksync_prover_interface.workspace = true zksync_basic_types.workspace = true -zksync_config.workspace = true zksync_object_store.workspace = true zksync_dal.workspace = true tokio.workspace = true diff --git a/core/node/external_proof_integration_api/src/error.rs b/core/node/external_proof_integration_api/src/error.rs index dac8e2a27ed6..505130048cc3 100644 --- a/core/node/external_proof_integration_api/src/error.rs +++ b/core/node/external_proof_integration_api/src/error.rs @@ -6,81 +6,74 @@ use zksync_basic_types::L1BatchNumber; use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; +#[derive(Debug, thiserror::Error)] pub(crate) enum ProcessorError { - ObjectStore(ObjectStoreError), - Dal(DalError), - Serialization(bincode::Error), + #[error("Failed to deserialize proof data")] + Serialization(#[from] bincode::Error), + #[error("Invalid proof submitted")] InvalidProof, + #[error("Batch {0} is not yet ready for proving. Most likely our proof for this batch is not generated yet, try again later")] BatchNotReady(L1BatchNumber), + #[error("Invalid file: {0}")] + InvalidFile(#[from] FileError), + #[error("Internal error")] + Internal, + #[error("Proof verification not possible anymore, batch is too old")] + ProofIsGone, } -impl From for ProcessorError { - fn from(err: ObjectStoreError) -> Self { - Self::ObjectStore(err) +impl ProcessorError { + fn status_code(&self) -> StatusCode { + match self { + Self::Internal => StatusCode::INTERNAL_SERVER_ERROR, + Self::Serialization(_) => StatusCode::BAD_REQUEST, + Self::InvalidProof => StatusCode::BAD_REQUEST, + Self::InvalidFile(_) => StatusCode::BAD_REQUEST, + Self::BatchNotReady(_) => StatusCode::NOT_FOUND, + Self::ProofIsGone => StatusCode::GONE, + } } } -impl From for ProcessorError { - fn from(err: DalError) -> Self { - Self::Dal(err) +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + (self.status_code(), self.to_string()).into_response() } } -impl From for ProcessorError { - fn from(err: bincode::Error) -> Self { - Self::Serialization(err) +impl From for ProcessorError { + fn from(err: ObjectStoreError) -> Self { + match err { + ObjectStoreError::KeyNotFound(_) => { + tracing::debug!("Too old proof was requested: {:?}", err); + Self::ProofIsGone + } + _ => { + tracing::warn!("GCS error: {:?}", err); + Self::Internal + } + } } } -impl IntoResponse for ProcessorError { - fn into_response(self) -> Response { - let (status_code, message) = match self { - ProcessorError::ObjectStore(err) => { - tracing::error!("GCS error: {:?}", err); - match err { - ObjectStoreError::KeyNotFound(_) => ( - StatusCode::NOT_FOUND, - "Proof verification not possible anymore, batch is too old.".to_owned(), - ), - _ => ( - StatusCode::INTERNAL_SERVER_ERROR, - "Failed fetching from GCS".to_owned(), - ), - } - } - ProcessorError::Dal(err) => { - tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::INTERNAL_SERVER_ERROR, - "Failed fetching/saving from db".to_owned(), - ), - } - } - ProcessorError::Serialization(err) => { - tracing::error!("Serialization error: {:?}", err); - ( - StatusCode::BAD_REQUEST, - "Failed to deserialize proof data".to_owned(), - ) - } - ProcessorError::BatchNotReady(l1_batch_number) => { - tracing::error!( - "Batch {l1_batch_number:?} is not yet ready for proving. Most likely our proof for this batch is not generated yet" - ); - ( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Batch {l1_batch_number:?} is not yet ready for proving. Most likely our proof for this batch is not generated yet, try again later"), - ) - } - ProcessorError::InvalidProof => { - tracing::error!("Invalid proof data"); - (StatusCode::BAD_REQUEST, "Invalid proof data".to_owned()) - } - }; - (status_code, message).into_response() +impl From for ProcessorError { + fn from(_err: DalError) -> Self { + // We don't want to check if the error is `RowNotFound`: we check that batch exists before + // processing a request, so it's handled separately. + // Thus, any unhandled error from DAL is an internal error. + Self::Internal } } + +#[derive(Debug, thiserror::Error)] +pub(crate) enum FileError { + #[error("Multipart error: {0}")] + MultipartRejection(#[from] axum::extract::multipart::MultipartRejection), + #[error("Multipart error: {0}")] + Multipart(#[from] axum::extract::multipart::MultipartError), + #[error("File not found in request. It was expected to be in the field {field_name} with the content type {content_type}")] + FileNotFound { + field_name: &'static str, + content_type: &'static str, + }, +} diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index b1ef33b44c10..4355896e2a2e 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -1,35 +1,81 @@ mod error; mod metrics; +mod middleware; mod processor; +mod types; -use std::{net::SocketAddr, sync::Arc}; +use std::net::SocketAddr; use anyhow::Context; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{ + extract::{Path, Request, State}, + middleware::Next, + routing::{get, post}, + Router, +}; +use error::ProcessorError; use tokio::sync::watch; -use zksync_basic_types::commitment::L1BatchCommitmentMode; -use zksync_config::configs::external_proof_integration_api::ExternalProofIntegrationApiConfig; -use zksync_dal::{ConnectionPool, Core}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::api::{OptionalProofGenerationDataRequest, VerifyProofRequest}; +use types::{ExternalProof, ProofGenerationDataResponse}; +use zksync_basic_types::L1BatchNumber; -use crate::processor::Processor; +pub use crate::processor::Processor; +use crate::{ + metrics::{CallOutcome, Method}, + middleware::MetricsMiddleware, +}; -pub async fn run_server( - config: ExternalProofIntegrationApiConfig, - blob_store: Arc, - connection_pool: ConnectionPool, - commitment_mode: L1BatchCommitmentMode, - mut stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting external prover API server on {bind_address}"); - let app = create_router(blob_store, connection_pool, commitment_mode).await; +/// External API implementation. +#[derive(Debug)] +pub struct Api { + router: Router, + port: u16, +} - let listener = tokio::net::TcpListener::bind(bind_address) - .await - .with_context(|| format!("Failed binding external prover API server to {bind_address}"))?; - axum::serve(listener, app) +impl Api { + pub fn new(processor: Processor, port: u16) -> Self { + let middleware_factory = |method: Method| { + axum::middleware::from_fn(move |req: Request, next: Next| async move { + let middleware = MetricsMiddleware::new(method); + let response = next.run(req).await; + let outcome = match response.status().is_success() { + true => CallOutcome::Success, + false => CallOutcome::Failure, + }; + middleware.observe(outcome); + response + }) + }; + + let router = Router::new() + .route( + "/proof_generation_data", + get(Api::latest_generation_data) + .layer(middleware_factory(Method::GetLatestProofGenerationData)), + ) + .route( + "/proof_generation_data/:l1_batch_number", + get(Api::generation_data_for_existing_batch) + .layer(middleware_factory(Method::GetSpecificProofGenerationData)), + ) + .route( + "/verify_proof/:l1_batch_number", + post(Api::verify_proof).layer(middleware_factory(Method::VerifyProof)), + ) + .with_state(processor); + + Self { router, port } + } + + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], self.port)); + tracing::info!("Starting external prover API server on {bind_address}"); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| { + format!("Failed binding external prover API server to {bind_address}") + })?; + axum::serve(listener, self.router) .with_graceful_shutdown(async move { if stop_receiver.changed().await.is_err() { tracing::warn!("Stop signal sender for external prover API server was dropped without sending a signal"); @@ -38,37 +84,32 @@ pub async fn run_server( }) .await .context("External prover API server failed")?; - tracing::info!("External prover API server shut down"); - Ok(()) -} + tracing::info!("External prover API server shut down"); + Ok(()) + } + + async fn latest_generation_data( + State(processor): State, + ) -> Result { + processor.get_proof_generation_data().await + } + + async fn generation_data_for_existing_batch( + State(processor): State, + Path(l1_batch_number): Path, + ) -> Result { + processor + .proof_generation_data_for_existing_batch(L1BatchNumber(l1_batch_number)) + .await + } -async fn create_router( - blob_store: Arc, - connection_pool: ConnectionPool, - commitment_mode: L1BatchCommitmentMode, -) -> Router { - let mut processor = - Processor::new(blob_store.clone(), connection_pool.clone(), commitment_mode); - let verify_proof_processor = processor.clone(); - Router::new() - .route( - "/proof_generation_data", - post( - // we use post method because the returned data is not idempotent, - // i.e we return different result on each call. - move |payload: Json| async move { - processor.get_proof_generation_data(payload).await - }, - ), - ) - .route( - "/verify_proof/:l1_batch_number", - post( - move |l1_batch_number: Path, payload: Json| async move { - verify_proof_processor - .verify_proof(l1_batch_number, payload) - .await - }, - ), - ) + async fn verify_proof( + State(processor): State, + Path(l1_batch_number): Path, + proof: ExternalProof, + ) -> Result<(), ProcessorError> { + processor + .verify_proof(L1BatchNumber(l1_batch_number), proof) + .await + } } diff --git a/core/node/external_proof_integration_api/src/metrics.rs b/core/node/external_proof_integration_api/src/metrics.rs index 70815f542a05..f43b49b7b1c0 100644 --- a/core/node/external_proof_integration_api/src/metrics.rs +++ b/core/node/external_proof_integration_api/src/metrics.rs @@ -1,6 +1,5 @@ use std::time::Duration; -use tokio::time::Instant; use vise::{EncodeLabelSet, EncodeLabelValue, Histogram, LabeledFamily, Metrics}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] @@ -25,31 +24,5 @@ pub(crate) struct ProofIntegrationApiMetrics { pub call_latency: LabeledFamily<(Method, CallOutcome), Histogram, 2>, } -pub(crate) struct MethodCallGuard { - method_type: Method, - outcome: CallOutcome, - started_at: Instant, -} - -impl MethodCallGuard { - pub(crate) fn new(method_type: Method) -> Self { - MethodCallGuard { - method_type, - outcome: CallOutcome::Failure, - started_at: Instant::now(), - } - } - - pub(crate) fn mark_successful(&mut self) { - self.outcome = CallOutcome::Success; - } -} - -impl Drop for MethodCallGuard { - fn drop(&mut self) { - METRICS.call_latency[&(self.method_type, self.outcome)].observe(self.started_at.elapsed()); - } -} - #[vise::register] pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/external_proof_integration_api/src/middleware.rs b/core/node/external_proof_integration_api/src/middleware.rs new file mode 100644 index 000000000000..1dc6aefe9171 --- /dev/null +++ b/core/node/external_proof_integration_api/src/middleware.rs @@ -0,0 +1,22 @@ +use tokio::time::Instant; + +use crate::metrics::{CallOutcome, Method, METRICS}; + +#[derive(Debug)] +pub(crate) struct MetricsMiddleware { + method: Method, + started_at: Instant, +} + +impl MetricsMiddleware { + pub fn new(method: Method) -> MetricsMiddleware { + MetricsMiddleware { + method, + started_at: Instant::now(), + } + } + + pub fn observe(&self, outcome: CallOutcome) { + METRICS.call_latency[&(self.method, outcome)].observe(self.started_at.elapsed()); + } +} diff --git a/core/node/external_proof_integration_api/src/processor.rs b/core/node/external_proof_integration_api/src/processor.rs index e9e56df4a068..b70b590df9fc 100644 --- a/core/node/external_proof_integration_api/src/processor.rs +++ b/core/node/external_proof_integration_api/src/processor.rs @@ -1,16 +1,12 @@ use std::sync::Arc; -use axum::{extract::Path, Json}; use zksync_basic_types::{ basic_fri_types::Eip4844Blobs, commitment::L1BatchCommitmentMode, L1BatchNumber, }; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_object_store::{bincode, ObjectStore}; +use zksync_object_store::ObjectStore; use zksync_prover_interface::{ - api::{ - OptionalProofGenerationDataRequest, ProofGenerationData, ProofGenerationDataResponse, - VerifyProofRequest, - }, + api::ProofGenerationData, inputs::{ L1BatchMetadataHashes, VMRunWitnessInputData, WitnessInputData, WitnessInputMerklePaths, }, @@ -19,18 +15,19 @@ use zksync_prover_interface::{ use crate::{ error::ProcessorError, - metrics::{Method, MethodCallGuard}, + types::{ExternalProof, ProofGenerationDataResponse}, }; +/// Backend-agnostic implementation of the API logic. #[derive(Clone)] -pub(crate) struct Processor { +pub struct Processor { blob_store: Arc, pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, } impl Processor { - pub(crate) fn new( + pub fn new( blob_store: Arc, pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, @@ -44,124 +41,90 @@ impl Processor { pub(crate) async fn verify_proof( &self, - Path(l1_batch_number): Path, - Json(payload): Json, + l1_batch_number: L1BatchNumber, + proof: ExternalProof, ) -> Result<(), ProcessorError> { - let mut guard = MethodCallGuard::new(Method::VerifyProof); + let expected_proof = self + .blob_store + .get::((l1_batch_number, proof.protocol_version())) + .await?; + proof.verify(expected_proof)?; + Ok(()) + } - let l1_batch_number = L1BatchNumber(l1_batch_number); - tracing::info!( - "Received request to verify proof for batch: {:?}", + pub(crate) async fn get_proof_generation_data( + &self, + ) -> Result { + tracing::debug!("Received request for proof generation data"); + let latest_available_batch = self.latest_available_batch().await?; + self.proof_generation_data_for_existing_batch_internal(latest_available_batch) + .await + .map(ProofGenerationDataResponse) + } + + pub(crate) async fn proof_generation_data_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + tracing::debug!( + "Received request for proof generation data for batch: {:?}", l1_batch_number ); - let serialized_proof = bincode::serialize(&payload.0)?; - let expected_proof = bincode::serialize( - &self - .blob_store - .get::((l1_batch_number, payload.0.protocol_version)) - .await?, - )?; + let latest_available_batch = self.latest_available_batch().await?; - if serialized_proof != expected_proof { - return Err(ProcessorError::InvalidProof); + if l1_batch_number > latest_available_batch { + tracing::error!( + "Requested batch is not available: {:?}, latest available batch is {:?}", + l1_batch_number, + latest_available_batch + ); + return Err(ProcessorError::BatchNotReady(l1_batch_number)); } - guard.mark_successful(); - - Ok(()) + self.proof_generation_data_for_existing_batch_internal(l1_batch_number) + .await + .map(ProofGenerationDataResponse) } - #[tracing::instrument(skip_all)] - pub(crate) async fn get_proof_generation_data( - &mut self, - request: Json, - ) -> Result, ProcessorError> { - tracing::info!("Received request for proof generation data: {:?}", request); - - let mut guard = match request.0 .0 { - Some(_) => MethodCallGuard::new(Method::GetSpecificProofGenerationData), - None => MethodCallGuard::new(Method::GetLatestProofGenerationData), - }; - - let latest_available_batch = self + async fn latest_available_batch(&self) -> Result { + Ok(self .pool .connection() .await .unwrap() .proof_generation_dal() .get_latest_proven_batch() - .await?; - - let l1_batch_number = if let Some(l1_batch_number) = request.0 .0 { - if l1_batch_number > latest_available_batch { - tracing::error!( - "Requested batch is not available: {:?}, latest available batch is {:?}", - l1_batch_number, - latest_available_batch - ); - return Err(ProcessorError::BatchNotReady(l1_batch_number)); - } - l1_batch_number - } else { - latest_available_batch - }; - - let proof_generation_data = self - .proof_generation_data_for_existing_batch(l1_batch_number) - .await; - - match proof_generation_data { - Ok(data) => { - guard.mark_successful(); - - Ok(Json(ProofGenerationDataResponse::Success(Some(Box::new( - data, - ))))) - } - Err(err) => Err(err), - } + .await?) } - #[tracing::instrument(skip(self))] - async fn proof_generation_data_for_existing_batch( + async fn proof_generation_data_for_existing_batch_internal( &self, l1_batch_number: L1BatchNumber, ) -> Result { - let vm_run_data: VMRunWitnessInputData = self - .blob_store - .get(l1_batch_number) - .await - .map_err(ProcessorError::ObjectStore)?; - let merkle_paths: WitnessInputMerklePaths = self - .blob_store - .get(l1_batch_number) - .await - .map_err(ProcessorError::ObjectStore)?; + let vm_run_data: VMRunWitnessInputData = self.blob_store.get(l1_batch_number).await?; + let merkle_paths: WitnessInputMerklePaths = self.blob_store.get(l1_batch_number).await?; // Acquire connection after interacting with GCP, to avoid holding the connection for too long. - let mut conn = self.pool.connection().await.map_err(ProcessorError::Dal)?; + let mut conn = self.pool.connection().await?; let previous_batch_metadata = conn .blocks_dal() .get_l1_batch_metadata(L1BatchNumber(l1_batch_number.checked_sub(1).unwrap())) - .await - .map_err(ProcessorError::Dal)? + .await? .expect("No metadata for previous batch"); let header = conn .blocks_dal() .get_l1_batch_header(l1_batch_number) - .await - .map_err(ProcessorError::Dal)? + .await? .unwrap_or_else(|| panic!("Missing header for {}", l1_batch_number)); let minor_version = header.protocol_version.unwrap(); let protocol_version = conn .protocol_versions_dal() .get_protocol_version_with_latest_patch(minor_version) - .await - .map_err(ProcessorError::Dal)? + .await? .unwrap_or_else(|| { panic!("Missing l1 verifier info for protocol version {minor_version}") }); @@ -169,8 +132,7 @@ impl Processor { let batch_header = conn .blocks_dal() .get_l1_batch_header(l1_batch_number) - .await - .map_err(ProcessorError::Dal)? + .await? .unwrap_or_else(|| panic!("Missing header for {}", l1_batch_number)); let eip_4844_blobs = match self.commitment_mode { diff --git a/core/node/external_proof_integration_api/src/types.rs b/core/node/external_proof_integration_api/src/types.rs new file mode 100644 index 000000000000..16d562d4a3db --- /dev/null +++ b/core/node/external_proof_integration_api/src/types.rs @@ -0,0 +1,105 @@ +use axum::{ + extract::{FromRequest, Multipart, Request}, + http::header, + response::{IntoResponse, Response}, +}; +use zksync_basic_types::protocol_version::ProtocolSemanticVersion; +use zksync_prover_interface::{api::ProofGenerationData, outputs::L1BatchProofForL1}; + +use crate::error::{FileError, ProcessorError}; + +#[derive(Debug)] +pub(crate) struct ProofGenerationDataResponse(pub ProofGenerationData); + +impl IntoResponse for ProofGenerationDataResponse { + fn into_response(self) -> Response { + let l1_batch_number = self.0.l1_batch_number; + let data = match bincode::serialize(&self.0.witness_input_data) { + Ok(data) => data, + Err(err) => { + return ProcessorError::Serialization(err).into_response(); + } + }; + + let headers = [ + (header::CONTENT_TYPE, "application/octet-stream"), + ( + header::CONTENT_DISPOSITION, + &format!( + "attachment; filename=\"witness_inputs_{}.bin\"", + l1_batch_number.0 + ), + ), + ]; + (headers, data).into_response() + } +} + +#[derive(Debug)] +pub(crate) struct ExternalProof { + raw: Vec, + protocol_version: ProtocolSemanticVersion, +} + +impl ExternalProof { + const FIELD_NAME: &'static str = "proof"; + const CONTENT_TYPE: &'static str = "application/octet-stream"; + + pub fn protocol_version(&self) -> ProtocolSemanticVersion { + self.protocol_version + } + + pub fn verify(&self, correct: L1BatchProofForL1) -> Result<(), ProcessorError> { + if correct.protocol_version != self.protocol_version { + return Err(ProcessorError::InvalidProof); + } + + if bincode::serialize(&correct)? != self.raw { + return Err(ProcessorError::InvalidProof); + } + + Ok(()) + } + + async fn extract_from_multipart( + req: Request, + state: &S, + ) -> Result, FileError> { + let mut multipart = Multipart::from_request(req, state).await?; + + let mut serialized_proof = vec![]; + while let Some(field) = multipart.next_field().await? { + if field.name() == Some(Self::FIELD_NAME) + && field.content_type() == Some(Self::CONTENT_TYPE) + { + serialized_proof = field.bytes().await?.to_vec(); + break; + } + } + + if serialized_proof.is_empty() { + // No proof field found + return Err(FileError::FileNotFound { + field_name: Self::FIELD_NAME, + content_type: Self::CONTENT_TYPE, + }); + } + + Ok(serialized_proof) + } +} + +#[async_trait::async_trait] +impl FromRequest for ExternalProof { + type Rejection = ProcessorError; + + async fn from_request(req: Request, state: &S) -> Result { + let serialized_proof = Self::extract_from_multipart(req, state).await?; + let proof: L1BatchProofForL1 = bincode::deserialize(&serialized_proof)?; + + Ok(Self { + raw: serialized_proof, + protocol_version: proof.protocol_version, + }) + } +} diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 09048515e7a0..8760b97d9db3 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -27,4 +27,3 @@ tracing.workspace = true [dev-dependencies] test-casing.workspace = true -zksync_node_test_utils.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 4ed9cf1330ea..e6842b92fdba 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -86,7 +86,7 @@ impl GasAdjuster { anyhow::ensure!( matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode" + "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index bbad6b9a2223..1f30d314bb06 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -6,9 +6,12 @@ use std::fmt::Formatter; use anyhow::Context as _; use zksync_config::GenesisConfig; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SET_CHAIN_ID_EVENT}; +use zksync_contracts::{ + hyperchain_contract, verifier_contract, BaseSystemContracts, BaseSystemContractsHashes, + SET_CHAIN_ID_EVENT, +}; use zksync_dal::{Connection, Core, CoreDal, DalError}; -use zksync_eth_client::EthInterface; +use zksync_eth_client::{CallFunctionArgs, EthInterface}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_multivm::utils::get_max_gas_per_pubdata_byte; use zksync_system_constants::PRIORITY_EXPIRATION; @@ -21,7 +24,7 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, web3::{BlockNumber, FilterBuilder}, AccountTreeId, Address, Bloom, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, - ProtocolVersion, ProtocolVersionId, StorageKey, H256, + ProtocolVersion, ProtocolVersionId, StorageKey, H256, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -110,12 +113,9 @@ impl GenesisParams { }, ))); } - // Try to convert value from config to the real protocol version and return error - // if the version doesn't exist - let _: ProtocolVersionId = config - .protocol_version - .map(|p| p.minor) - .ok_or(GenesisError::MalformedConfig("protocol_version"))?; + if config.protocol_version.is_none() { + return Err(GenesisError::MalformedConfig("protocol_version")); + } Ok(GenesisParams { base_system_contracts, system_contracts, @@ -175,8 +175,7 @@ pub fn mock_genesis_config() -> GenesisConfig { l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: first_l1_verifier_config - .recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: first_l1_verifier_config.snark_wrapper_vk_hash, fee_account: Default::default(), dummy_verifier: false, l1_batch_commit_data_generator_mode: Default::default(), @@ -190,7 +189,7 @@ pub async fn insert_genesis_batch( ) -> Result { let mut transaction = storage.start_transaction().await?; let verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: genesis_params.config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: genesis_params.config.snark_wrapper_vk_hash, }; create_genesis_l1_batch( @@ -264,6 +263,49 @@ pub async fn is_genesis_needed(storage: &mut Connection<'_, Core>) -> Result anyhow::Result<()> { + let hyperchain_abi = hyperchain_contract(); + let verifier_abi = verifier_contract(); + + let packed_protocol_version: U256 = CallFunctionArgs::new("getProtocolVersion", ()) + .for_contract(diamond_proxy_address, &hyperchain_abi) + .call(query_client) + .await?; + + let protocol_version = ProtocolSemanticVersion::try_from_packed(packed_protocol_version) + .map_err(|err| anyhow::format_err!("Failed to unpack semver: {err}"))?; + + if protocol_version != genesis_params.protocol_version() { + return Err(anyhow::anyhow!( + "Protocol version mismatch: {protocol_version} on contract, {} in config", + genesis_params.protocol_version() + )); + } + + let verifier_address: Address = CallFunctionArgs::new("getVerifier", ()) + .for_contract(diamond_proxy_address, &hyperchain_abi) + .call(query_client) + .await?; + + let verification_key_hash: H256 = CallFunctionArgs::new("verificationKeyHash", ()) + .for_contract(verifier_address, &verifier_abi) + .call(query_client) + .await?; + + if verification_key_hash != genesis_params.config().snark_wrapper_vk_hash { + return Err(anyhow::anyhow!( + "Verification key hash mismatch: {verification_key_hash:?} on contract, {:?} in config", + genesis_params.config().snark_wrapper_vk_hash + )); + } + + Ok(()) +} + pub async fn ensure_genesis_state( storage: &mut Connection<'_, Core>, genesis_params: &GenesisParams, diff --git a/core/node/house_keeper/Cargo.toml b/core/node/house_keeper/Cargo.toml index ed86a713ea25..b2ed3c14c20f 100644 --- a/core/node/house_keeper/Cargo.toml +++ b/core/node/house_keeper/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true vise.workspace = true zksync_dal.workspace = true zksync_shared_metrics.workspace = true -zksync_prover_dal.workspace = true zksync_types.workspace = true zksync_config.workspace = true diff --git a/core/node/house_keeper/src/blocks_state_reporter.rs b/core/node/house_keeper/src/blocks_state_reporter.rs index 5285390a2783..6f85aa0fbb09 100644 --- a/core/node/house_keeper/src/blocks_state_reporter.rs +++ b/core/node/house_keeper/src/blocks_state_reporter.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockL1Stage, BlockStage, L1StageLatencyLabel, APP_METRICS}; -use crate::periodic_job::PeriodicJob; +use crate::{metrics::FRI_PROVER_METRICS, periodic_job::PeriodicJob}; #[derive(Debug)] pub struct L1BatchMetricsReporter { @@ -88,6 +88,37 @@ impl L1BatchMetricsReporter { APP_METRICS.blocks_state_block_eth_stage_latency[&L1StageLatencyLabel::UnexecutedBlock] .set(now.saturating_sub(timestamp)); } + + // proof generation details metrics + let oldest_unpicked_batch = match conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await? + { + Some(l1_batch_number) => l1_batch_number.0 as u64, + // if there is no unpicked batch in database, we use sealed batch number as a result + None => { + conn.blocks_dal() + .get_sealed_l1_batch_number() + .await + .unwrap() + .unwrap() + .0 as u64 + } + }; + FRI_PROVER_METRICS + .oldest_unpicked_batch + .set(oldest_unpicked_batch); + + if let Some(l1_batch_number) = conn + .proof_generation_dal() + .get_oldest_not_generated_batch() + .await? + { + FRI_PROVER_METRICS + .oldest_not_generated_batch + .set(l1_batch_number.0 as u64); + } Ok(()) } } diff --git a/core/node/house_keeper/src/lib.rs b/core/node/house_keeper/src/lib.rs index 68d4ad2f8ba4..4e0d1962fc02 100644 --- a/core/node/house_keeper/src/lib.rs +++ b/core/node/house_keeper/src/lib.rs @@ -1,3 +1,3 @@ pub mod blocks_state_reporter; +mod metrics; pub mod periodic_job; -pub mod prover; diff --git a/core/node/house_keeper/src/metrics.rs b/core/node/house_keeper/src/metrics.rs new file mode 100644 index 000000000000..cc1438e35963 --- /dev/null +++ b/core/node/house_keeper/src/metrics.rs @@ -0,0 +1,11 @@ +use vise::{Gauge, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "fri_prover")] +pub(crate) struct FriProverMetrics { + pub oldest_unpicked_batch: Gauge, + pub oldest_not_generated_batch: Gauge, +} + +#[vise::register] +pub(crate) static FRI_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs deleted file mode 100644 index b0f5ff23fe3f..000000000000 --- a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::time::Duration; - -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; - -/// `FriGpuProverArchiver` is a task that periodically archives old fri GPU prover records. -/// The task will archive the `dead` prover records that have not been updated for a certain amount of time. -/// Note: These components speed up provers, in their absence, queries would become sub optimal. -#[derive(Debug)] -pub struct FriGpuProverArchiver { - pool: ConnectionPool, - archiving_interval_ms: u64, - archive_prover_after_secs: u64, -} - -impl FriGpuProverArchiver { - pub fn new( - pool: ConnectionPool, - archiving_interval_ms: u64, - archive_prover_after_secs: u64, - ) -> Self { - Self { - pool, - archiving_interval_ms, - archive_prover_after_secs, - } - } -} - -#[async_trait::async_trait] -impl PeriodicJob for FriGpuProverArchiver { - const SERVICE_NAME: &'static str = "FriGpuProverArchiver"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let archived_provers = self - .pool - .connection() - .await - .unwrap() - .fri_gpu_prover_queue_dal() - .archive_old_provers(Duration::from_secs(self.archive_prover_after_secs)) - .await; - tracing::info!("Archived {:?} fri gpu prover records", archived_provers); - HOUSE_KEEPER_METRICS - .gpu_prover_archived - .inc_by(archived_provers as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.archiving_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs deleted file mode 100644 index 684c955231cf..000000000000 --- a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::time::Duration; - -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; - -/// `FriProverJobsArchiver` is a task that periodically archives old finalized prover job. -/// The task will archive the `successful` prover jobs that have been done for a certain amount of time. -/// Note: These components speed up provers, in their absence, queries would become sub optimal. -#[derive(Debug)] -pub struct FriProverJobsArchiver { - pool: ConnectionPool, - reporting_interval_ms: u64, - archiving_interval_secs: u64, -} - -impl FriProverJobsArchiver { - pub fn new( - pool: ConnectionPool, - reporting_interval_ms: u64, - archiving_interval_secs: u64, - ) -> Self { - Self { - pool, - reporting_interval_ms, - archiving_interval_secs, - } - } -} - -#[async_trait::async_trait] -impl PeriodicJob for FriProverJobsArchiver { - const SERVICE_NAME: &'static str = "FriProverJobsArchiver"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let archived_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_prover_jobs_dal() - .archive_old_jobs(Duration::from_secs(self.archiving_interval_secs)) - .await; - tracing::info!("Archived {:?} fri prover jobs", archived_jobs); - HOUSE_KEEPER_METRICS - .prover_job_archived - .inc_by(archived_jobs as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/archiver/mod.rs b/core/node/house_keeper/src/prover/archiver/mod.rs deleted file mode 100644 index 36b82a7735ce..000000000000 --- a/core/node/house_keeper/src/prover/archiver/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod fri_gpu_prover_archiver; -mod fri_prover_jobs_archiver; - -pub use fri_gpu_prover_archiver::FriGpuProverArchiver; -pub use fri_prover_jobs_archiver::FriProverJobsArchiver; diff --git a/core/node/house_keeper/src/prover/metrics.rs b/core/node/house_keeper/src/prover/metrics.rs deleted file mode 100644 index 7711c9c04a6b..000000000000 --- a/core/node/house_keeper/src/prover/metrics.rs +++ /dev/null @@ -1,123 +0,0 @@ -use vise::{Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, LabeledFamily, Metrics}; -use zksync_types::protocol_version::ProtocolSemanticVersion; -#[derive(Debug, Metrics)] -#[metrics(prefix = "house_keeper")] -pub(crate) struct HouseKeeperMetrics { - pub prover_job_archived: Counter, - pub gpu_prover_archived: Counter, -} - -#[vise::register] -pub(crate) static HOUSE_KEEPER_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] -#[metrics(rename_all = "snake_case")] -#[allow(dead_code)] -pub enum JobStatus { - Queued, - InProgress, - Successful, - Failed, - SentToServer, - Skipped, -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "prover_fri")] -pub(crate) struct ProverFriMetrics { - pub proof_compressor_requeued_jobs: Counter, - #[metrics(labels = ["type", "protocol_version"])] - pub proof_compressor_jobs: LabeledFamily<(JobStatus, String), Gauge, 2>, - pub proof_compressor_oldest_uncompressed_batch: Gauge, -} - -#[vise::register] -pub(crate) static PROVER_FRI_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] -pub(crate) struct ProverJobsLabels { - pub r#type: &'static str, - pub circuit_id: String, - pub aggregation_round: String, - pub prover_group_id: String, - pub protocol_version: String, -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "fri_prover")] -pub(crate) struct FriProverMetrics { - pub prover_jobs: Family>, - #[metrics(labels = ["circuit_id", "aggregation_round"])] - pub block_number: LabeledFamily<(String, String), Gauge, 2>, - pub oldest_unpicked_batch: Gauge, - pub oldest_not_generated_batch: Gauge, - #[metrics(labels = ["round"])] - pub oldest_unprocessed_block_by_round: LabeledFamily>, -} - -impl FriProverMetrics { - pub fn report_prover_jobs( - &self, - r#type: &'static str, - circuit_id: u8, - aggregation_round: u8, - prover_group_id: u8, - protocol_version: ProtocolSemanticVersion, - amount: u64, - ) { - self.prover_jobs[&ProverJobsLabels { - r#type, - circuit_id: circuit_id.to_string(), - aggregation_round: aggregation_round.to_string(), - prover_group_id: prover_group_id.to_string(), - protocol_version: protocol_version.to_string(), - }] - .set(amount); - } -} - -#[vise::register] -pub(crate) static FRI_PROVER_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "type", rename_all = "snake_case")] -#[allow(clippy::enum_variant_names)] -pub(crate) enum WitnessType { - WitnessInputsFri, - LeafAggregationJobsFri, - NodeAggregationJobsFri, - RecursionTipJobsFri, - SchedulerJobsFri, -} - -impl From<&str> for WitnessType { - fn from(s: &str) -> Self { - match s { - "witness_inputs_fri" => Self::WitnessInputsFri, - "leaf_aggregations_jobs_fri" => Self::LeafAggregationJobsFri, - "node_aggregations_jobs_fri" => Self::NodeAggregationJobsFri, - "recursion_tip_jobs_fri" => Self::RecursionTipJobsFri, - "scheduler_jobs_fri" => Self::SchedulerJobsFri, - _ => panic!("Invalid witness type"), - } - } -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "server")] -pub(crate) struct ServerMetrics { - pub prover_fri_requeued_jobs: Counter, - pub requeued_jobs: Family>, - #[metrics(labels = ["type", "round", "protocol_version"])] - pub witness_generator_jobs_by_round: - LabeledFamily<(&'static str, String, String), Gauge, 3>, - #[metrics(labels = ["type", "protocol_version"])] - pub witness_generator_jobs: LabeledFamily<(&'static str, String), Gauge, 2>, - pub leaf_fri_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub node_fri_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub recursion_tip_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub scheduler_witness_generator_waiting_to_queued_jobs_transitions: Counter, -} - -#[vise::register] -pub(crate) static SERVER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/house_keeper/src/prover/mod.rs b/core/node/house_keeper/src/prover/mod.rs deleted file mode 100644 index af315c53cb48..000000000000 --- a/core/node/house_keeper/src/prover/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod archiver; -mod metrics; -mod queue_reporter; -mod retry_manager; -mod waiting_to_queued_fri_witness_job_mover; - -pub use archiver::{FriGpuProverArchiver, FriProverJobsArchiver}; -pub use queue_reporter::{ - FriProofCompressorQueueReporter, FriProverQueueReporter, FriWitnessGeneratorQueueReporter, -}; -pub use retry_manager::{ - FriProofCompressorJobRetryManager, FriProverJobRetryManager, FriWitnessGeneratorJobRetryManager, -}; -pub use waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover; diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs deleted file mode 100644 index c554bf4616d3..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::collections::HashMap; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; - -use crate::{ - periodic_job::PeriodicJob, - prover::metrics::{JobStatus, PROVER_FRI_METRICS}, -}; - -/// `FriProofCompressorQueueReporter` is a task that periodically reports compression jobs status. -/// Note: these values will be used for auto-scaling proof compressor -#[derive(Debug)] -pub struct FriProofCompressorQueueReporter { - reporting_interval_ms: u64, - pool: ConnectionPool, -} - -impl FriProofCompressorQueueReporter { - pub fn new(reporting_interval_ms: u64, pool: ConnectionPool) -> Self { - Self { - reporting_interval_ms, - pool, - } - } - - async fn get_job_statistics( - pool: &ConnectionPool, - ) -> HashMap { - pool.connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .get_jobs_stats() - .await - } -} - -#[async_trait] -impl PeriodicJob for FriProofCompressorQueueReporter { - const SERVICE_NAME: &'static str = "FriProofCompressorQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stats = Self::get_job_statistics(&self.pool).await; - - for (protocol_version, stats) in &stats { - if stats.queued > 0 { - tracing::info!( - "Found {} free {} in progress proof compressor jobs for protocol version {}", - stats.queued, - stats.in_progress, - protocol_version - ); - } - - PROVER_FRI_METRICS.proof_compressor_jobs - [&(JobStatus::Queued, protocol_version.to_string())] - .set(stats.queued as u64); - - PROVER_FRI_METRICS.proof_compressor_jobs - [&(JobStatus::InProgress, protocol_version.to_string())] - .set(stats.in_progress as u64); - } - - let oldest_not_compressed_batch = self - .pool - .connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .get_oldest_not_compressed_batch() - .await; - - if let Some(l1_batch_number) = oldest_not_compressed_batch { - PROVER_FRI_METRICS - .proof_compressor_oldest_uncompressed_batch - .set(l1_batch_number.0 as u64); - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs deleted file mode 100644 index 12dfae86ab46..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ /dev/null @@ -1,144 +0,0 @@ -use async_trait::async_trait; -use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, prover_dal::JobCountStatistics}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; -/// `FriProverQueueReporter` is a task that periodically reports prover jobs status. -/// Note: these values will be used for auto-scaling provers and Witness Vector Generators. -#[derive(Debug)] -pub struct FriProverQueueReporter { - reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, - config: FriProverGroupConfig, -} - -impl FriProverQueueReporter { - pub fn new( - reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, - config: FriProverGroupConfig, - ) -> Self { - Self { - reporting_interval_ms, - prover_connection_pool, - db_connection_pool, - config, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProverQueueReporter { - const SERVICE_NAME: &'static str = "FriProverQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let mut conn = self.prover_connection_pool.connection().await.unwrap(); - let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; - - for (protocol_semantic_version, circuit_prover_stats) in stats { - for (tuple, stat) in circuit_prover_stats { - let CircuitIdRoundTuple { - circuit_id, - aggregation_round, - } = tuple; - let JobCountStatistics { - queued, - in_progress, - } = stat; - let group_id = self - .config - .get_group_id_for_circuit_id_and_aggregation_round( - circuit_id, - aggregation_round, - ) - .unwrap_or(u8::MAX); - - FRI_PROVER_METRICS.report_prover_jobs( - "queued", - circuit_id, - aggregation_round, - group_id, - protocol_semantic_version, - queued as u64, - ); - - FRI_PROVER_METRICS.report_prover_jobs( - "in_progress", - circuit_id, - aggregation_round, - group_id, - protocol_semantic_version, - in_progress as u64, - ); - } - } - - let lag_by_circuit_type = conn - .fri_prover_jobs_dal() - .min_unproved_l1_batch_number() - .await; - - for ((circuit_id, aggregation_round), l1_batch_number) in lag_by_circuit_type { - FRI_PROVER_METRICS.block_number - [&(circuit_id.to_string(), aggregation_round.to_string())] - .set(l1_batch_number.0 as u64); - } - - // FIXME: refactor metrics here - - let mut db_conn = self.db_connection_pool.connection().await.unwrap(); - - let oldest_unpicked_batch = match db_conn - .proof_generation_dal() - .get_oldest_unpicked_batch() - .await? - { - Some(l1_batch_number) => l1_batch_number.0 as u64, - // if there is no unpicked batch in database, we use sealed batch number as a result - None => { - db_conn - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap() - .unwrap() - .0 as u64 - } - }; - FRI_PROVER_METRICS - .oldest_unpicked_batch - .set(oldest_unpicked_batch); - - if let Some(l1_batch_number) = db_conn - .proof_generation_dal() - .get_oldest_not_generated_batch() - .await? - { - FRI_PROVER_METRICS - .oldest_not_generated_batch - .set(l1_batch_number.0 as u64); - } - - for aggregation_round in 0..3 { - if let Some(l1_batch_number) = conn - .fri_prover_jobs_dal() - .min_unproved_l1_batch_number_for_aggregation_round(aggregation_round.into()) - .await - { - FRI_PROVER_METRICS.oldest_unprocessed_block_by_round - [&aggregation_round.to_string()] - .set(l1_batch_number.0 as u64); - } - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs deleted file mode 100644 index cd124dffaf67..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, - prover_dal::JobCountStatistics, -}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -/// `FriWitnessGeneratorQueueReporter` is a task that periodically reports witness generator jobs status. -/// Note: these values will be used for auto-scaling witness generators (Basic, Leaf, Node, Recursion Tip and Scheduler). -#[derive(Debug)] -pub struct FriWitnessGeneratorQueueReporter { - reporting_interval_ms: u64, - pool: ConnectionPool, -} - -impl FriWitnessGeneratorQueueReporter { - pub fn new(pool: ConnectionPool, reporting_interval_ms: u64) -> Self { - Self { - reporting_interval_ms, - pool, - } - } - - async fn get_job_statistics( - &self, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { - let mut conn = self.pool.connection().await.unwrap(); - let mut result = HashMap::new(); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::BasicCircuits) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::LeafAggregation) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::NodeAggregation) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::RecursionTip) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::Scheduler) - .await, - ); - - result - } -} - -fn emit_metrics_for_round( - round: AggregationRound, - protocol_version: ProtocolSemanticVersion, - stats: &JobCountStatistics, -) { - if stats.queued > 0 || stats.in_progress > 0 { - tracing::trace!( - "Found {} free and {} in progress {:?} FRI witness generators jobs for protocol version {}", - stats.queued, - stats.in_progress, - round, - protocol_version - ); - } - - SERVER_METRICS.witness_generator_jobs_by_round[&( - "queued", - format!("{:?}", round), - protocol_version.to_string(), - )] - .set(stats.queued as u64); - SERVER_METRICS.witness_generator_jobs_by_round[&( - "in_progress", - format!("{:?}", round), - protocol_version.to_string(), - )] - .set(stats.in_progress as u64); -} - -#[async_trait] -impl PeriodicJob for FriWitnessGeneratorQueueReporter { - const SERVICE_NAME: &'static str = "FriWitnessGeneratorQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stats_for_all_rounds = self.get_job_statistics().await; - let mut aggregated = HashMap::::new(); - for ((round, protocol_version), stats) in stats_for_all_rounds { - emit_metrics_for_round(round, protocol_version, &stats); - - let entry = aggregated.entry(protocol_version).or_default(); - entry.queued += stats.queued; - entry.in_progress += stats.in_progress; - } - - for (protocol_version, stats) in &aggregated { - if stats.queued > 0 || stats.in_progress > 0 { - tracing::trace!( - "Found {} free {} in progress witness generators jobs for protocol version {}", - stats.queued, - stats.in_progress, - protocol_version - ); - } - - SERVER_METRICS.witness_generator_jobs[&("queued", protocol_version.to_string())] - .set(stats.queued as u64); - - SERVER_METRICS.witness_generator_jobs[&("in_progress", protocol_version.to_string())] - .set(stats.in_progress as u64); - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/mod.rs b/core/node/house_keeper/src/prover/queue_reporter/mod.rs deleted file mode 100644 index 9eba45320988..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod fri_proof_compressor_queue_reporter; -mod fri_prover_queue_reporter; -mod fri_witness_generator_queue_reporter; - -pub use fri_proof_compressor_queue_reporter::FriProofCompressorQueueReporter; -pub use fri_prover_queue_reporter::FriProverQueueReporter; -pub use fri_witness_generator_queue_reporter::FriWitnessGeneratorQueueReporter; diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs deleted file mode 100644 index 4d4d8ceed75e..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::time::Duration; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::PROVER_FRI_METRICS}; - -/// `FriProofCompressorJobRetryManager` is a task that periodically queues stuck compressor jobs. -#[derive(Debug)] -pub struct FriProofCompressorJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, -} - -impl FriProofCompressorJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeout, - retry_interval_ms, - pool, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProofCompressorJobRetryManager { - const SERVICE_NAME: &'static str = "FriProofCompressorJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) - .await; - let job_len = stuck_jobs.len(); - for stuck_job in stuck_jobs { - tracing::info!("re-queuing fri proof compressor job {:?}", stuck_job); - } - PROVER_FRI_METRICS - .proof_compressor_requeued_jobs - .inc_by(job_len as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs deleted file mode 100644 index 755944d21634..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::time::Duration; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -/// `FriProverJobRetryManager` is a task that periodically queues stuck prover jobs. -#[derive(Debug)] -pub struct FriProverJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, -} - -impl FriProverJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeout, - retry_interval_ms, - pool, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProverJobRetryManager { - const SERVICE_NAME: &'static str = "FriProverJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_prover_jobs_dal() - .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) - .await; - let job_len = stuck_jobs.len(); - for stuck_job in stuck_jobs { - tracing::info!("re-queuing fri prover job {:?}", stuck_job); - } - SERVER_METRICS - .prover_fri_requeued_jobs - .inc_by(job_len as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs deleted file mode 100644 index b3d990e2754f..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs +++ /dev/null @@ -1,124 +0,0 @@ -use async_trait::async_trait; -use zksync_config::configs::fri_witness_generator::WitnessGenerationTimeouts; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::prover_dal::StuckJobs; - -use crate::{ - periodic_job::PeriodicJob, - prover::metrics::{WitnessType, SERVER_METRICS}, -}; - -/// `FriWitnessGeneratorJobRetryManager` is a task that periodically queues stuck prover jobs. -#[derive(Debug)] -pub struct FriWitnessGeneratorJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeouts: WitnessGenerationTimeouts, - retry_interval_ms: u64, -} - -impl FriWitnessGeneratorJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeouts: WitnessGenerationTimeouts, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeouts, - retry_interval_ms, - pool, - } - } - - pub fn emit_telemetry(&self, witness_type: &str, stuck_jobs: &Vec) { - for stuck_job in stuck_jobs { - tracing::info!("re-queuing {:?} {:?}", witness_type, stuck_job); - } - SERVER_METRICS.requeued_jobs[&WitnessType::from(witness_type)] - .inc_by(stuck_jobs.len() as u64); - } - - pub async fn requeue_stuck_witness_inputs_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_basic_jobs(self.processing_timeouts.basic(), self.max_attempts) - .await; - self.emit_telemetry("witness_inputs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_leaf_aggregations_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_leaf_jobs(self.processing_timeouts.leaf(), self.max_attempts) - .await; - self.emit_telemetry("leaf_aggregations_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_node_aggregations_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_node_jobs(self.processing_timeouts.node(), self.max_attempts) - .await; - self.emit_telemetry("node_aggregations_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_recursion_tip_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_recursion_tip_jobs( - self.processing_timeouts.recursion_tip(), - self.max_attempts, - ) - .await; - self.emit_telemetry("recursion_tip_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_scheduler_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_scheduler_jobs(self.processing_timeouts.scheduler(), self.max_attempts) - .await; - self.emit_telemetry("scheduler_jobs_fri", &stuck_jobs); - } -} - -#[async_trait] -impl PeriodicJob for FriWitnessGeneratorJobRetryManager { - const SERVICE_NAME: &'static str = "FriWitnessGeneratorJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - self.requeue_stuck_witness_inputs_jobs().await; - self.requeue_stuck_leaf_aggregations_jobs().await; - self.requeue_stuck_node_aggregations_jobs().await; - self.requeue_stuck_recursion_tip_jobs().await; - self.requeue_stuck_scheduler_jobs().await; - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/mod.rs b/core/node/house_keeper/src/prover/retry_manager/mod.rs deleted file mode 100644 index 3b4a8b584817..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod fri_proof_compressor_job_retry_manager; -mod fri_prover_job_retry_manager; -mod fri_witness_generator_jobs_retry_manager; - -pub use fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager; -pub use fri_prover_job_retry_manager::FriProverJobRetryManager; -pub use fri_witness_generator_jobs_retry_manager::FriWitnessGeneratorJobRetryManager; diff --git a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs b/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs deleted file mode 100644 index d4d5edc78eb9..000000000000 --- a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs +++ /dev/null @@ -1,127 +0,0 @@ -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -#[derive(Debug)] -pub struct WaitingToQueuedFriWitnessJobMover { - job_moving_interval_ms: u64, - pool: ConnectionPool, -} - -impl WaitingToQueuedFriWitnessJobMover { - pub fn new(job_mover_interval_ms: u64, pool: ConnectionPool) -> Self { - Self { - job_moving_interval_ms: job_mover_interval_ms, - pool, - } - } - - async fn move_leaf_aggregation_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_leaf_aggregation_jobs_from_waiting_to_queued() - .await; - let len = l1_batch_numbers.len(); - for (l1_batch_number, circuit_id) in l1_batch_numbers { - tracing::info!( - "Marked fri leaf aggregation job for l1_batch {} and circuit_id {} as queued", - l1_batch_number, - circuit_id - ); - } - - SERVER_METRICS - .node_fri_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(len as u64); - } - - async fn move_node_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec<(i64, u8, u16)> { - let mut conn = self.pool.connection().await.unwrap(); - let mut jobs = conn - .fri_witness_generator_dal() - .move_depth_zero_node_aggregation_jobs() - .await; - jobs.extend( - conn.fri_witness_generator_dal() - .move_depth_non_zero_node_aggregation_jobs() - .await, - ); - jobs - } - - async fn move_node_aggregation_jobs(&mut self) { - let l1_batch_numbers = self - .move_node_aggregation_jobs_from_waiting_to_queued() - .await; - let len = l1_batch_numbers.len(); - for (l1_batch_number, circuit_id, depth) in l1_batch_numbers { - tracing::info!( - "Marked fri node aggregation job for l1_batch {} and circuit_id {} depth {} as queued", - l1_batch_number, - circuit_id, - depth - ); - } - SERVER_METRICS - .leaf_fri_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(len as u64); - } - - /// Marks recursion tip witness jobs as queued. - /// The trigger condition is all final node proving jobs for the batch have been completed. - async fn move_recursion_tip_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_recursion_tip_jobs_from_waiting_to_queued() - .await; - for l1_batch_number in &l1_batch_numbers { - tracing::info!( - "Marked fri recursion tip witness job for l1_batch {} as queued", - l1_batch_number, - ); - } - SERVER_METRICS - .recursion_tip_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(l1_batch_numbers.len() as u64); - } - - /// Marks scheduler witness jobs as queued. - /// The trigger condition is the recursion tip proving job for the batch has been completed. - async fn move_scheduler_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_scheduler_jobs_from_waiting_to_queued() - .await; - for l1_batch_number in &l1_batch_numbers { - tracing::info!( - "Marked fri scheduler witness job for l1_batch {} as queued", - l1_batch_number, - ); - } - SERVER_METRICS - .scheduler_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(l1_batch_numbers.len() as u64); - } -} - -#[async_trait] -impl PeriodicJob for WaitingToQueuedFriWitnessJobMover { - const SERVICE_NAME: &'static str = "WaitingToQueuedFriWitnessJobMover"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - self.move_leaf_aggregation_jobs().await; - self.move_node_aggregation_jobs().await; - self.move_recursion_tip_jobs().await; - self.move_scheduler_jobs().await; - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.job_moving_interval_ms - } -} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 3a81a578c033..1df47e775539 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -16,10 +16,8 @@ zksync_vlog.workspace = true zksync_types.workspace = true zksync_health_check.workspace = true zksync_dal.workspace = true -zksync_prover_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true -zksync_protobuf_config.workspace = true zksync_state.workspace = true zksync_object_store.workspace = true zksync_storage.workspace = true @@ -36,8 +34,10 @@ zksync_house_keeper.workspace = true zksync_node_fee_model.workspace = true zksync_eth_sender.workspace = true zksync_da_client.workspace = true +zksync_da_clients.workspace = true zksync_da_dispatcher.workspace = true zksync_block_reverter.workspace = true +zksync_vm_executor.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true zksync_metadata_calculator.workspace = true @@ -64,9 +64,9 @@ futures.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["rt"] } ctrlc.workspace = true +semver.workspace = true [dev-dependencies] -zksync_env_config.workspace = true assert_matches.workspace = true # For running UI tests for proc macro trybuild.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs index 3632613379f8..347d69e55363 100644 --- a/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs +++ b/core/node/node_framework/src/implementations/layers/base_token/base_token_ratio_persister.rs @@ -1,9 +1,9 @@ -use zksync_base_token_adjuster::{BaseTokenRatioPersister, BaseTokenRatioPersisterL1Params}; +use zksync_base_token_adjuster::{BaseTokenL1Behaviour, BaseTokenRatioPersister, UpdateOnL1Params}; use zksync_config::{ configs::{base_token_adjuster::BaseTokenAdjusterConfig, wallets::Wallets}, ContractsConfig, }; -use zksync_contracts::chain_admin_contract; +use zksync_contracts::{chain_admin_contract, getters_facet_contract}; use zksync_eth_client::clients::PKSigningClient; use zksync_types::L1ChainId; @@ -83,38 +83,44 @@ impl WiringLayer for BaseTokenRatioPersisterLayer { .base_token_addr .expect("base token address is not set"); - let l1_params = - self.wallets_config - .token_multiplier_setter - .map(|token_multiplier_setter| { - let tms_private_key = token_multiplier_setter.wallet.private_key(); - let tms_address = token_multiplier_setter.wallet.address(); - let EthInterfaceResource(query_client) = input.eth_client; + let l1_behaviour = self + .wallets_config + .token_multiplier_setter + .map(|token_multiplier_setter| { + let tms_private_key = token_multiplier_setter.wallet.private_key(); + let tms_address = token_multiplier_setter.wallet.address(); + let EthInterfaceResource(query_client) = input.eth_client; - let signing_client = PKSigningClient::new_raw( - tms_private_key.clone(), - self.contracts_config.diamond_proxy_addr, - self.config.default_priority_fee_per_gas, - #[allow(clippy::useless_conversion)] - self.l1_chain_id.into(), - query_client.clone().for_component("base_token_adjuster"), - ); - BaseTokenRatioPersisterL1Params { + let signing_client = PKSigningClient::new_raw( + tms_private_key.clone(), + self.contracts_config.diamond_proxy_addr, + self.config.default_priority_fee_per_gas, + #[allow(clippy::useless_conversion)] + self.l1_chain_id.into(), + query_client.clone().for_component("base_token_adjuster"), + ); + BaseTokenL1Behaviour::UpdateOnL1 { + params: UpdateOnL1Params { eth_client: Box::new(signing_client), gas_adjuster: input.tx_params.0, token_multiplier_setter_account_address: tms_address, chain_admin_contract: chain_admin_contract(), + getters_facet_contract: getters_facet_contract(), diamond_proxy_contract_address: self.contracts_config.diamond_proxy_addr, chain_admin_contract_address: self.contracts_config.chain_admin_addr, - } - }); + config: self.config.clone(), + }, + last_persisted_l1_ratio: None, + } + }) + .unwrap_or(BaseTokenL1Behaviour::NoOp); let persister = BaseTokenRatioPersister::new( master_pool, self.config, base_token_addr, price_api_client.0, - l1_params, + l1_behaviour, ); Ok(Output { persister }) diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs index 14365384c1a4..5acdab568e74 100644 --- a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -23,6 +23,7 @@ use crate::{ /// Wiring layer for external node consensus component. #[derive(Debug)] pub struct ExternalNodeConsensusLayer { + pub build_version: semver::Version, pub config: Option, pub secrets: Option, } @@ -78,6 +79,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { }; let consensus_task = ExternalNodeTask { + build_version: self.build_version, config, pool, main_node_client, @@ -90,6 +92,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { #[derive(Debug)] pub struct ExternalNodeTask { + build_version: semver::Version, config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Box>, @@ -118,6 +121,7 @@ impl Task for ExternalNodeTask { self.sync_state, self.main_node_client, self.action_queue_sender, + self.build_version, )); // `run_external_node` might return an error or panic, // in which case we need to return immediately, diff --git a/core/node/node_framework/src/implementations/layers/da_clients/avail.rs b/core/node/node_framework/src/implementations/layers/da_clients/avail.rs new file mode 100644 index 000000000000..7c3d82b6d25b --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/avail.rs @@ -0,0 +1,45 @@ +use zksync_config::AvailConfig; +use zksync_da_client::DataAvailabilityClient; +use zksync_da_clients::avail::AvailClient; + +use crate::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +#[derive(Debug)] +pub struct AvailWiringLayer { + config: AvailConfig, +} + +impl AvailWiringLayer { + pub fn new(config: AvailConfig) -> Self { + Self { config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for AvailWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "avail_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(AvailClient::new(self.config).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs new file mode 100644 index 000000000000..48311ce4c3f2 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs @@ -0,0 +1,3 @@ +pub mod avail; +pub mod no_da; +pub mod object_store; diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/node/node_framework/src/implementations/layers/da_clients/no_da.rs similarity index 90% rename from core/lib/default_da_clients/src/no_da/wiring_layer.rs rename to core/node/node_framework/src/implementations/layers/da_clients/no_da.rs index 71a2ee7ce582..5a81ce9b3400 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/no_da.rs @@ -1,18 +1,19 @@ use std::fmt::Debug; use zksync_da_client::DataAvailabilityClient; -use zksync_node_framework::{ +use zksync_da_clients::no_da::NoDAClient; + +use crate::{ implementations::resources::da_client::DAClientResource, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; -use crate::no_da::client::NoDAClient; - #[derive(Debug, Default)] pub struct NoDAClientWiringLayer; #[derive(Debug, IntoContext)] +#[context(crate = crate)] pub struct Output { pub client: DAClientResource, } diff --git a/core/lib/default_da_clients/src/object_store/wiring_layer.rs b/core/node/node_framework/src/implementations/layers/da_clients/object_store.rs similarity index 91% rename from core/lib/default_da_clients/src/object_store/wiring_layer.rs rename to core/node/node_framework/src/implementations/layers/da_clients/object_store.rs index 6fc84fb707b7..3fb720696da5 100644 --- a/core/lib/default_da_clients/src/object_store/wiring_layer.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/object_store.rs @@ -1,13 +1,13 @@ use zksync_config::ObjectStoreConfig; use zksync_da_client::DataAvailabilityClient; -use zksync_node_framework::{ +use zksync_da_clients::object_store::ObjectStoreDAClient; + +use crate::{ implementations::resources::da_client::DAClientResource, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; -use crate::object_store::client::ObjectStoreDAClient; - #[derive(Debug)] pub struct ObjectStorageClientWiringLayer { config: ObjectStoreConfig, @@ -20,6 +20,7 @@ impl ObjectStorageClientWiringLayer { } #[derive(Debug, IntoContext)] +#[context(crate = crate)] pub struct Output { pub client: DAClientResource, } diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs index cfe701326bd6..310580aeb3a3 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -8,10 +8,7 @@ use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, - eth_interface::{ - BoundEthInterfaceForBlobsResource, BoundEthInterfaceForL2Resource, - BoundEthInterfaceResource, - }, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, object_store::ObjectStoreResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, @@ -55,7 +52,6 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: Option, pub eth_client_blobs: Option, - pub eth_client_l2: Option, pub object_store: ObjectStoreResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, @@ -100,11 +96,6 @@ impl WiringLayer for EthTxAggregatorLayer { let master_pool = input.master_pool.get().await.unwrap(); let replica_pool = input.replica_pool.get().await.unwrap(); - let eth_client = if self.settlement_mode.is_gateway() { - input.eth_client_l2.context("l2_client must be provided")?.0 - } else { - input.eth_client.context("l1_client must be provided")?.0 - }; let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); let object_store = input.object_store.0; @@ -125,7 +116,7 @@ impl WiringLayer for EthTxAggregatorLayer { master_pool.clone(), config.clone(), aggregator, - eth_client.clone(), + input.eth_client.unwrap().0, self.contracts_config.validator_timelock_addr, self.contracts_config.l1_multicall3_addr, self.contracts_config.diamond_proxy_addr, diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs index d6989d8db72b..5462fa575f94 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -6,10 +6,7 @@ use zksync_eth_sender::EthTxManager; use crate::{ implementations::resources::{ circuit_breakers::CircuitBreakersResource, - eth_interface::{ - BoundEthInterfaceForBlobsResource, BoundEthInterfaceForL2Resource, - BoundEthInterfaceResource, - }, + eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, gas_adjuster::GasAdjusterResource, pools::{MasterPool, PoolResource, ReplicaPool}, }, @@ -48,7 +45,6 @@ pub struct Input { pub replica_pool: PoolResource, pub eth_client: BoundEthInterfaceResource, pub eth_client_blobs: Option, - pub l2_client: Option, pub gas_adjuster: GasAdjusterResource, #[context(default)] pub circuit_breakers: CircuitBreakersResource, @@ -81,9 +77,10 @@ impl WiringLayer for EthTxManagerLayer { let master_pool = input.master_pool.get().await.unwrap(); let replica_pool = input.replica_pool.get().await.unwrap(); - let eth_client = input.eth_client.0; + let settlement_mode = self.eth_sender_config.gas_adjuster.unwrap().settlement_mode; + let eth_client = input.eth_client.0.clone(); let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); - let l2_client = input.l2_client.map(|c| c.0); + let l2_client = input.eth_client.0; let config = self.eth_sender_config.sender.context("sender")?; @@ -93,9 +90,21 @@ impl WiringLayer for EthTxManagerLayer { master_pool, config, gas_adjuster, - Some(eth_client), - eth_client_blobs, - l2_client, + if !settlement_mode.is_gateway() { + Some(eth_client) + } else { + None + }, + if !settlement_mode.is_gateway() { + eth_client_blobs + } else { + None + }, + if settlement_mode.is_gateway() { + Some(l2_client) + } else { + None + }, ); // Insert circuit breaker. diff --git a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs index 9678c0a97932..46ed562cad90 100644 --- a/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs +++ b/core/node/node_framework/src/implementations/layers/external_proof_integration_api.rs @@ -1,8 +1,5 @@ -use std::sync::Arc; - use zksync_config::configs::external_proof_integration_api::ExternalProofIntegrationApiConfig; -use zksync_dal::{ConnectionPool, Core}; -use zksync_object_store::ObjectStore; +use zksync_external_proof_integration_api::{Api, Processor}; use zksync_types::commitment::L1BatchCommitmentMode; use crate::{ @@ -34,7 +31,7 @@ pub struct Input { #[context(crate = crate)] pub struct Output { #[context(task)] - pub task: ExternalProofIntegrationApiTask, + pub task: Api, } impl ExternalProofIntegrationApiLayer { @@ -62,39 +59,23 @@ impl WiringLayer for ExternalProofIntegrationApiLayer { let replica_pool = input.replica_pool.get().await.unwrap(); let blob_store = input.object_store.0; - let task = ExternalProofIntegrationApiTask { - external_proof_integration_api_config: self.external_proof_integration_api_config, - blob_store, - replica_pool, - commitment_mode: self.commitment_mode, - }; + let processor = Processor::new(blob_store, replica_pool, self.commitment_mode); + let task = Api::new( + processor, + self.external_proof_integration_api_config.http_port, + ); Ok(Output { task }) } } -#[derive(Debug)] -pub struct ExternalProofIntegrationApiTask { - external_proof_integration_api_config: ExternalProofIntegrationApiConfig, - blob_store: Arc, - replica_pool: ConnectionPool, - commitment_mode: L1BatchCommitmentMode, -} - #[async_trait::async_trait] -impl Task for ExternalProofIntegrationApiTask { +impl Task for Api { fn id(&self) -> TaskId { "external_proof_integration_api".into() } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - zksync_external_proof_integration_api::run_server( - self.external_proof_integration_api_config, - self.blob_store, - self.replica_pool, - self.commitment_mode, - stop_receiver.0, - ) - .await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 74314320d815..1e2bc568d50f 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -1,20 +1,10 @@ -use zksync_config::configs::{ - fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, -}; +use zksync_config::configs::house_keeper::HouseKeeperConfig; use zksync_house_keeper::{ - blocks_state_reporter::L1BatchMetricsReporter, - periodic_job::PeriodicJob, - prover::{ - FriGpuProverArchiver, FriProofCompressorJobRetryManager, FriProofCompressorQueueReporter, - FriProverJobRetryManager, FriProverJobsArchiver, FriProverQueueReporter, - FriWitnessGeneratorJobRetryManager, FriWitnessGeneratorQueueReporter, - WaitingToQueuedFriWitnessJobMover, - }, + blocks_state_reporter::L1BatchMetricsReporter, periodic_job::PeriodicJob, }; use crate::{ - implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, + implementations::resources::pools::{PoolResource, ReplicaPool}, service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, @@ -26,17 +16,12 @@ use crate::{ #[derive(Debug)] pub struct HouseKeeperLayer { house_keeper_config: HouseKeeperConfig, - fri_prover_config: FriProverConfig, - fri_witness_generator_config: FriWitnessGeneratorConfig, - fri_prover_group_config: FriProverGroupConfig, - fri_proof_compressor_config: FriProofCompressorConfig, } #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { pub replica_pool: PoolResource, - pub prover_pool: PoolResource, } #[derive(Debug, IntoContext)] @@ -44,40 +29,12 @@ pub struct Input { pub struct Output { #[context(task)] pub l1_batch_metrics_reporter: L1BatchMetricsReporter, - #[context(task)] - pub fri_prover_job_retry_manager: FriProverJobRetryManager, - #[context(task)] - pub fri_witness_generator_job_retry_manager: FriWitnessGeneratorJobRetryManager, - #[context(task)] - pub waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, - #[context(task)] - pub fri_prover_job_archiver: Option, - #[context(task)] - pub fri_prover_gpu_archiver: Option, - #[context(task)] - pub fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, - #[context(task)] - pub fri_prover_stats_reporter: FriProverQueueReporter, - #[context(task)] - pub fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, - #[context(task)] - pub fri_proof_compressor_job_retry_manager: FriProofCompressorJobRetryManager, } impl HouseKeeperLayer { - pub fn new( - house_keeper_config: HouseKeeperConfig, - fri_prover_config: FriProverConfig, - fri_witness_generator_config: FriWitnessGeneratorConfig, - fri_prover_group_config: FriProverGroupConfig, - fri_proof_compressor_config: FriProofCompressorConfig, - ) -> Self { + pub fn new(house_keeper_config: HouseKeeperConfig) -> Self { Self { house_keeper_config, - fri_prover_config, - fri_witness_generator_config, - fri_prover_group_config, - fri_proof_compressor_config, } } } @@ -94,7 +51,6 @@ impl WiringLayer for HouseKeeperLayer { async fn wire(self, input: Self::Input) -> Result { // Initialize resources let replica_pool = input.replica_pool.get().await?; - let prover_pool = input.prover_pool.get().await?; // Initialize and add tasks let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( @@ -103,78 +59,8 @@ impl WiringLayer for HouseKeeperLayer { replica_pool.clone(), ); - let fri_prover_job_retry_manager = FriProverJobRetryManager::new( - self.fri_prover_config.max_attempts, - self.fri_prover_config.proof_generation_timeout(), - self.house_keeper_config.prover_job_retrying_interval_ms, - prover_pool.clone(), - ); - - let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( - self.fri_witness_generator_config.max_attempts, - self.fri_witness_generator_config - .witness_generation_timeouts(), - self.house_keeper_config - .witness_generator_job_retrying_interval_ms, - prover_pool.clone(), - ); - - let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( - self.house_keeper_config.witness_job_moving_interval_ms, - prover_pool.clone(), - ); - - let fri_prover_job_archiver = self.house_keeper_config.prover_job_archiver_params().map( - |(archiving_interval, archive_after)| { - FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after) - }, - ); - - let fri_prover_gpu_archiver = self - .house_keeper_config - .fri_gpu_prover_archiver_params() - .map(|(archiving_interval, archive_after)| { - FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after) - }); - - let fri_witness_generator_stats_reporter = FriWitnessGeneratorQueueReporter::new( - prover_pool.clone(), - self.house_keeper_config - .witness_generator_stats_reporting_interval_ms, - ); - - let fri_prover_stats_reporter = FriProverQueueReporter::new( - self.house_keeper_config.prover_stats_reporting_interval_ms, - prover_pool.clone(), - replica_pool.clone(), - self.fri_prover_group_config, - ); - - let fri_proof_compressor_stats_reporter = FriProofCompressorQueueReporter::new( - self.house_keeper_config - .proof_compressor_stats_reporting_interval_ms, - prover_pool.clone(), - ); - - let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( - self.fri_proof_compressor_config.max_attempts, - self.fri_proof_compressor_config.generation_timeout(), - self.house_keeper_config - .proof_compressor_job_retrying_interval_ms, - prover_pool.clone(), - ); - Ok(Output { l1_batch_metrics_reporter, - fri_prover_job_retry_manager, - fri_witness_generator_job_retry_manager: fri_witness_gen_job_retry_manager, - waiting_to_queued_fri_witness_job_mover, - fri_prover_job_archiver, - fri_prover_gpu_archiver, - fri_witness_generator_stats_reporter, - fri_prover_stats_reporter, - fri_proof_compressor_stats_reporter, - fri_proof_compressor_job_retry_manager: fri_proof_compressor_retry_manager, }) } } @@ -189,102 +75,3 @@ impl Task for L1BatchMetricsReporter { (*self).run(stop_receiver.0).await } } - -#[async_trait::async_trait] -impl Task for FriProverJobRetryManager { - fn id(&self) -> TaskId { - "fri_prover_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriWitnessGeneratorJobRetryManager { - fn id(&self) -> TaskId { - "fri_witness_generator_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for WaitingToQueuedFriWitnessJobMover { - fn id(&self) -> TaskId { - "waiting_to_queued_fri_witness_job_mover".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriWitnessGeneratorQueueReporter { - fn id(&self) -> TaskId { - "fri_witness_generator_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProverQueueReporter { - fn id(&self) -> TaskId { - "fri_prover_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProofCompressorQueueReporter { - fn id(&self) -> TaskId { - "fri_proof_compressor_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProofCompressorJobRetryManager { - fn id(&self) -> TaskId { - "fri_proof_compressor_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProverJobsArchiver { - fn id(&self) -> TaskId { - "fri_prover_jobs_archiver".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriGpuProverArchiver { - fn id(&self) -> TaskId { - "fri_gpu_prover_archiver".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 6f3500a82cb9..75828da19023 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -6,6 +6,7 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; +pub mod da_clients; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs index 317f0b197d83..bdd69214de9a 100644 --- a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs @@ -76,16 +76,18 @@ impl WiringLayer for ExternalNodeInitStrategyLayer { }); let snapshot_recovery = match self.snapshot_recovery_config { Some(recovery_config) => { + // Add a connection for checking whether the storage is initialized. let recovery_pool = input .master_pool - .get_custom(self.max_postgres_concurrency.get() as u32) + .get_custom(self.max_postgres_concurrency.get() as u32 + 1) .await?; - let recovery = Arc::new(ExternalNodeSnapshotRecovery { + let recovery: Arc = Arc::new(ExternalNodeSnapshotRecovery { client: client.clone(), pool: recovery_pool, + max_concurrency: self.max_postgres_concurrency, recovery_config, app_health, - }) as Arc; + }); Some(recovery) } None => None, diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index 734f6f0ccf69..e03cf40ce12d 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -2,7 +2,7 @@ use zksync_config::configs::{DatabaseSecrets, PostgresConfig}; use zksync_dal::{ConnectionPool, Core}; use crate::{ - implementations::resources::pools::{MasterPool, PoolResource, ProverPool, ReplicaPool}, + implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -13,7 +13,6 @@ pub struct PoolsLayerBuilder { config: PostgresConfig, with_master: bool, with_replica: bool, - with_prover: bool, secrets: DatabaseSecrets, } @@ -25,7 +24,6 @@ impl PoolsLayerBuilder { config, with_master: false, with_replica: false, - with_prover: false, secrets: database_secrets, } } @@ -42,12 +40,6 @@ impl PoolsLayerBuilder { self } - /// Allows to enable the prover pool. - pub fn with_prover(mut self, with_prover: bool) -> Self { - self.with_prover = with_prover; - self - } - /// Builds the [`PoolsLayer`] with the provided configuration. pub fn build(self) -> PoolsLayer { PoolsLayer { @@ -55,7 +47,6 @@ impl PoolsLayerBuilder { secrets: self.secrets, with_master: self.with_master, with_replica: self.with_replica, - with_prover: self.with_prover, } } } @@ -67,14 +58,12 @@ impl PoolsLayerBuilder { /// /// - `PoolResource::` (if master pool is enabled) /// - `PoolResource::` (if replica pool is enabled) -/// - `PoolResource::` (if prover pool is enabled) #[derive(Debug)] pub struct PoolsLayer { config: PostgresConfig, secrets: DatabaseSecrets, with_master: bool, with_replica: bool, - with_prover: bool, } #[derive(Debug, IntoContext)] @@ -82,7 +71,6 @@ pub struct PoolsLayer { pub struct Output { pub master_pool: Option>, pub replica_pool: Option>, - pub prover_pool: Option>, } #[async_trait::async_trait] @@ -95,7 +83,7 @@ impl WiringLayer for PoolsLayer { } async fn wire(self, _input: Self::Input) -> Result { - if !self.with_master && !self.with_replica && !self.with_prover { + if !self.with_master && !self.with_replica { return Err(WiringError::Configuration( "At least one pool should be enabled".to_string(), )); @@ -137,21 +125,9 @@ impl WiringLayer for PoolsLayer { None }; - let prover_pool = if self.with_prover { - Some(PoolResource::::new( - self.secrets.prover_url()?, - self.config.max_connections()?, - None, - None, - )) - } else { - None - }; - Ok(Output { master_pool, replica_pool, - prover_pool, }) } } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 3288b68bdebb..f369db2bbf01 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,5 +1,5 @@ -use zksync_state_keeper::MainBatchExecutor; use zksync_types::vm::FastVmMode; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, @@ -39,8 +39,10 @@ impl WiringLayer for MainBatchExecutorLayer { } async fn wire(self, (): Self::Input) -> Result { - let mut executor = - MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); + let mut executor = MainBatchExecutorFactory::new( + self.save_call_traces, + self.optional_bytecode_compression, + ); executor.set_fast_vm_mode(self.fast_vm_mode); Ok(executor.into()) } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index a77344f3706e..55defd095be8 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -1,20 +1,14 @@ use std::sync::Arc; use anyhow::Context; -use zksync_state::{AsyncCatchupTask, ReadStorageFactory}; +pub use zksync_state::RocksdbStorageOptions; +use zksync_state::{AsyncCatchupTask, OwnedStorage, ReadStorageFactory}; use zksync_state_keeper::{ - seal_criteria::ConditionalSealer, AsyncRocksdbCache, BatchExecutor, OutputHandler, - StateKeeperIO, ZkSyncStateKeeper, + seal_criteria::ConditionalSealer, AsyncRocksdbCache, OutputHandler, StateKeeperIO, + ZkSyncStateKeeper, }; use zksync_storage::RocksDB; - -pub mod external_io; -pub mod main_batch_executor; -pub mod mempool_io; -pub mod output_handler; - -// Public re-export to not require the user to directly depend on `zksync_state`. -pub use zksync_state::RocksdbStorageOptions; +use zksync_vm_executor::interface::BatchExecutorFactory; use crate::{ implementations::resources::{ @@ -30,6 +24,11 @@ use crate::{ FromContext, IntoContext, }; +pub mod external_io; +pub mod main_batch_executor; +pub mod mempool_io; +pub mod output_handler; + /// Wiring layer for the state keeper. #[derive(Debug)] pub struct StateKeeperLayer { @@ -102,7 +101,7 @@ impl WiringLayer for StateKeeperLayer { let state_keeper = StateKeeperTask { io, - batch_executor: batch_executor_base, + executor_factory: batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), @@ -125,7 +124,7 @@ impl WiringLayer for StateKeeperLayer { #[derive(Debug)] pub struct StateKeeperTask { io: Box, - batch_executor: Box, + executor_factory: Box>, output_handler: OutputHandler, sealer: Arc, storage_factory: Arc, @@ -141,7 +140,7 @@ impl Task for StateKeeperTask { let state_keeper = ZkSyncStateKeeper::new( stop_receiver.0, self.io, - self.batch_executor, + self.executor_factory, self.output_handler, self.sealer, self.storage_factory, diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index ee2fb84416e1..858692d3c854 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -1,6 +1,6 @@ use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; -use zksync_state_keeper::MainBatchExecutor; use zksync_types::L2ChainId; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use zksync_vm_runner::{ impls::{BasicWitnessInputProducer, BasicWitnessInputProducerIo}, ConcurrentOutputHandlerFactoryTask, StorageSyncTask, @@ -76,12 +76,12 @@ impl WiringLayer for BasicWitnessInputProducerLayer { let connection_pool = master_pool.get_custom(self.config.window_size + 2).await?; // We don't get the executor from the context because it would contain state keeper-specific settings. - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let batch_executor = MainBatchExecutorFactory::new(false, false); let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( connection_pool, object_store.0, - batch_executor, + Box::new(batch_executor), self.config.db_path, self.zksync_network_id, self.config.first_processed_batch, diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs index eedde16074f5..ee1be98319b3 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -3,7 +3,10 @@ use zksync_config::configs::ExperimentalVmPlaygroundConfig; use zksync_node_framework_derive::{FromContext, IntoContext}; use zksync_types::L2ChainId; use zksync_vm_runner::{ - impls::{VmPlayground, VmPlaygroundCursorOptions, VmPlaygroundIo, VmPlaygroundLoaderTask}, + impls::{ + VmPlayground, VmPlaygroundCursorOptions, VmPlaygroundIo, VmPlaygroundLoaderTask, + VmPlaygroundStorageOptions, + }, ConcurrentOutputHandlerFactoryTask, }; @@ -45,7 +48,7 @@ pub struct Output { #[context(task)] pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, #[context(task)] - pub loader_task: VmPlaygroundLoaderTask, + pub loader_task: Option, #[context(task)] pub playground: VmPlayground, } @@ -71,7 +74,13 @@ impl WiringLayer for VmPlaygroundLayer { // to DB for querying last processed batch and last ready to be loaded batch. // - `window_size` connections for running VM instances. let connection_pool = replica_pool - .get_custom(2 + self.config.window_size.get()) + .build(|builder| { + builder + .set_max_size(2 + self.config.window_size.get()) + .set_statement_timeout(None); + // Unlike virtually all other replica pool uses, VM playground has some long-living operations, + // so the default statement timeout would only get in the way. + }) .await?; let cursor = VmPlaygroundCursorOptions { @@ -79,10 +88,15 @@ impl WiringLayer for VmPlaygroundLayer { window_size: self.config.window_size, reset_state: self.config.reset, }; + let storage = if let Some(path) = self.config.db_path { + VmPlaygroundStorageOptions::Rocksdb(path) + } else { + VmPlaygroundStorageOptions::Snapshots { shadow: false } + }; let (playground, tasks) = VmPlayground::new( connection_pool, self.config.fast_vm_mode, - self.config.db_path, + storage, self.zksync_network_id, cursor, ) @@ -119,6 +133,6 @@ impl Task for VmPlayground { } async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(&stop_receiver.0).await + (*self).run(stop_receiver.0).await } } diff --git a/core/node/node_framework/src/implementations/resources/pools.rs b/core/node/node_framework/src/implementations/resources/pools.rs index 8355bb1bdd62..3c4d1d4712be 100644 --- a/core/node/node_framework/src/implementations/resources/pools.rs +++ b/core/node/node_framework/src/implementations/resources/pools.rs @@ -9,7 +9,6 @@ use std::{ use tokio::sync::Mutex; use zksync_dal::{ConnectionPool, Core}; use zksync_db_connection::connection_pool::ConnectionPoolBuilder; -use zksync_prover_dal::Prover; use zksync_types::url::SensitiveUrl; use crate::resource::Resource; @@ -86,7 +85,20 @@ impl PoolResource

{ } pub async fn get_custom(&self, size: u32) -> anyhow::Result> { - let result = self.builder().set_max_size(size).build().await; + self.build(|builder| { + builder.set_max_size(size); + }) + .await + } + + pub async fn build(&self, build_fn: F) -> anyhow::Result> + where + F: FnOnce(&mut ConnectionPoolBuilder), + { + let mut builder = self.builder(); + build_fn(&mut builder); + let size = builder.max_size(); + let result = builder.build().await; if result.is_ok() { let old_count = self.connections_count.fetch_add(size, Ordering::Relaxed); @@ -109,10 +121,6 @@ pub struct MasterPool {} #[non_exhaustive] pub struct ReplicaPool {} -#[derive(Debug, Clone)] -#[non_exhaustive] -pub struct ProverPool {} - pub trait PoolKind: Clone + Sync + Send + 'static { type DbMarker: zksync_db_connection::connection::DbMarker; @@ -134,11 +142,3 @@ impl PoolKind for ReplicaPool { "replica" } } - -impl PoolKind for ProverPool { - type DbMarker = Prover; - - fn kind_str() -> &'static str { - "prover" - } -} diff --git a/core/node/node_framework/src/implementations/resources/state_keeper.rs b/core/node/node_framework/src/implementations/resources/state_keeper.rs index 5db570d7989b..eed0e022774d 100644 --- a/core/node/node_framework/src/implementations/resources/state_keeper.rs +++ b/core/node/node_framework/src/implementations/resources/state_keeper.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_state_keeper::{ - seal_criteria::ConditionalSealer, BatchExecutor, OutputHandler, StateKeeperIO, -}; +use zksync_state::OwnedStorage; +use zksync_state_keeper::{seal_criteria::ConditionalSealer, OutputHandler, StateKeeperIO}; +use zksync_vm_executor::interface::BatchExecutorFactory; use crate::resource::{Resource, Unique}; @@ -23,10 +23,10 @@ impl From for StateKeeperIOResource { } } -/// A resource that provides [`BatchExecutor`] implementation to the service. +/// A resource that provides [`BatchExecutorFactory`] implementation to the service. /// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] -pub struct BatchExecutorResource(pub Unique>); +pub struct BatchExecutorResource(pub Unique>>); impl Resource for BatchExecutorResource { fn name() -> String { @@ -34,7 +34,10 @@ impl Resource for BatchExecutorResource { } } -impl From for BatchExecutorResource { +impl From for BatchExecutorResource +where + T: BatchExecutorFactory, +{ fn from(executor: T) -> Self { Self(Unique::new(Box::new(executor))) } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index 9e3555f22c21..b6d420093541 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -200,7 +200,7 @@ impl ZkStackService { // Report all the errors we've met during the init. if !errors.is_empty() { for (layer, error) in &errors { - tracing::error!("Wiring layer {layer} can't be initialized: {error}"); + tracing::error!("Wiring layer {layer} can't be initialized: {error:?}"); } return Err(ZkStackServiceError::Wiring(errors)); } @@ -302,7 +302,7 @@ impl ZkStackService { tracing::info!("Shutdown hook {name} completed"); } Ok(Err(err)) => { - tracing::error!("Shutdown hook {name} failed: {err}"); + tracing::error!("Shutdown hook {name} failed: {err:?}"); self.errors.push(TaskError::ShutdownHookFailed(name, err)); } Err(_) => { @@ -324,7 +324,7 @@ impl ZkStackService { tracing::info!("Task {task_name} finished"); } Ok(Err(err)) => { - tracing::error!("Task {task_name} failed: {err}"); + tracing::error!("Task {task_name} failed: {err:?}"); self.errors.push(TaskError::TaskFailed(task_name, err)); } Err(panic_err) => { diff --git a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs index d9ba60a1bcbf..9bc065b939cc 100644 --- a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs +++ b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Instant}; +use std::{num::NonZeroUsize, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::sync::watch; @@ -17,6 +17,7 @@ use crate::{InitializeStorage, SnapshotRecoveryConfig}; pub struct ExternalNodeSnapshotRecovery { pub client: Box>, pub pool: ConnectionPool, + pub max_concurrency: NonZeroUsize, pub recovery_config: SnapshotRecoveryConfig, pub app_health: Arc, } @@ -24,8 +25,17 @@ pub struct ExternalNodeSnapshotRecovery { #[async_trait::async_trait] impl InitializeStorage for ExternalNodeSnapshotRecovery { async fn initialize_storage(&self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let pool = self.pool.clone(); tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); + + let pool_size = self.pool.max_size() as usize; + if pool_size < self.max_concurrency.get() + 1 { + tracing::error!( + "Connection pool has insufficient number of connections ({pool_size} vs concurrency {} + 1 connection for checks). \ + This will likely lead to pool starvation during recovery.", + self.max_concurrency + ); + } + let object_store_config = self.recovery_config.object_store_config.clone().context( "Snapshot object store must be presented if snapshot recovery is activated", @@ -34,10 +44,13 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { .create_store() .await?; - let config = SnapshotsApplierConfig::default(); + let config = SnapshotsApplierConfig { + max_concurrency: self.max_concurrency, + ..SnapshotsApplierConfig::default() + }; let mut snapshots_applier_task = SnapshotsApplierTask::new( config, - pool, + self.pool.clone(), Box::new(self.client.clone().for_component("snapshot_recovery")), object_store, ); @@ -80,3 +93,60 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { Ok(completed) } } + +#[cfg(test)] +mod tests { + use std::future; + + use zksync_types::{ + tokens::{TokenInfo, TokenMetadata}, + Address, L2BlockNumber, + }; + use zksync_web3_decl::client::MockClient; + + use super::*; + + #[tokio::test] + async fn recovery_does_not_starve_pool_connections() { + let pool = ConnectionPool::constrained_test_pool(5).await; + let app_health = Arc::new(AppHealthCheck::new(None, None)); + let client = MockClient::builder(L2::default()) + .method("en_syncTokens", |_number: Option| { + Ok(vec![TokenInfo { + l1_address: Address::repeat_byte(1), + l2_address: Address::repeat_byte(2), + metadata: TokenMetadata { + name: "test".to_string(), + symbol: "TEST".to_string(), + decimals: 18, + }, + }]) + }) + .build(); + let recovery = ExternalNodeSnapshotRecovery { + client: Box::new(client), + pool, + max_concurrency: NonZeroUsize::new(4).unwrap(), + recovery_config: SnapshotRecoveryConfig { + snapshot_l1_batch_override: None, + drop_storage_key_preimages: false, + object_store_config: None, + }, + app_health, + }; + + // Emulate recovery by indefinitely holding onto `max_concurrency` connections. In practice, + // the snapshot applier will release connections eventually, but it may require more time than the connection + // acquisition timeout configured for the DB pool. + for _ in 0..recovery.max_concurrency.get() { + let connection = recovery.pool.connection().await.unwrap(); + tokio::spawn(async move { + future::pending::<()>().await; + drop(connection); + }); + } + + // The only token reported by the mock client isn't recovered + assert!(!recovery.is_initialized().await.unwrap()); + } +} diff --git a/core/node/node_storage_init/src/main_node/genesis.rs b/core/node/node_storage_init/src/main_node/genesis.rs index db2eef51912e..e98473840370 100644 --- a/core/node/node_storage_init/src/main_node/genesis.rs +++ b/core/node/node_storage_init/src/main_node/genesis.rs @@ -30,6 +30,12 @@ impl InitializeStorage for MainNodeGenesis { } let params = GenesisParams::load_genesis_params(self.genesis.clone())?; + zksync_node_genesis::validate_genesis_params( + ¶ms, + &self.l1_client, + self.contracts.diamond_proxy_addr, + ) + .await?; zksync_node_genesis::ensure_genesis_state(&mut storage, ¶ms).await?; if let Some(ecosystem_contracts) = &self.contracts.ecosystem_contracts { diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 5f1ae04c5f50..ccfc8dd8a4e9 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -25,7 +25,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_concurrency.workspace = true vise.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index d064803eab59..ee89db10ddd1 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -42,12 +42,7 @@ pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { with_transactions: bool, ) -> EnrichedClientResult>; - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult>; - async fn fetch_genesis_config(&self) -> EnrichedClientResult; - - async fn fetch_attestation_status(&self) - -> EnrichedClientResult>; } #[async_trait] @@ -133,20 +128,6 @@ impl MainNodeClient for Box> { .with_arg("with_transactions", &with_transactions) .await } - - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult> { - self.consensus_genesis() - .rpc_context("consensus_genesis") - .await - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - self.attestation_status() - .rpc_context("attestation_status") - .await - } } /// Main node health check. diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 50734421341e..b7b8930c4957 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -20,7 +20,7 @@ use zksync_types::{ L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; use zksync_utils::bytes_to_be_words; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use super::{ client::MainNodeClient, diff --git a/core/node/node_sync/src/testonly.rs b/core/node/node_sync/src/testonly.rs index b9e1adc995af..16027a71a251 100644 --- a/core/node/node_sync/src/testonly.rs +++ b/core/node/node_sync/src/testonly.rs @@ -71,18 +71,6 @@ impl MainNodeClient for MockMainNodeClient { Ok(Some(block)) } - async fn fetch_consensus_genesis( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - async fn fetch_genesis_config(&self) -> EnrichedClientResult { Ok(mock_genesis_config()) } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index edd8306e72e0..d9a98c2bce36 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -121,15 +121,15 @@ impl StateKeeperHandles { .unwrap(); let (stop_sender, stop_receiver) = watch::channel(false); - let mut batch_executor_base = TestBatchExecutorBuilder::default(); + let mut batch_executor = TestBatchExecutorBuilder::default(); for &tx_hashes_in_l1_batch in tx_hashes { - batch_executor_base.push_successful_transactions(tx_hashes_in_l1_batch); + batch_executor.push_successful_transactions(tx_hashes_in_l1_batch); } let state_keeper = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(NoopSealer), Arc::new(MockReadStorageFactory), diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 31a0e8437ba5..82063b23fdb5 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -16,7 +16,6 @@ zksync_config.workspace = true zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_tee_verifier.workspace = true zksync_types.workspace = true anyhow.workspace = true axum.workspace = true diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 88d4930e6920..6ab7e4dec436 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -94,7 +94,7 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "Sgx" })).unwrap()); + let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); let response = app .oneshot( Request::builder() @@ -134,7 +134,7 @@ async fn submit_tee_proof() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_request = serde_json::from_str::(tee_proof_request_str).unwrap(); @@ -232,7 +232,7 @@ async fn mock_tee_batch_status( .await .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation ('ready_to_be_proven') + // mock SQL table with relevant information about the status of TEE proof generation proof_dal .insert_tee_proof_generation_job(batch_number, TeeType::Sgx) diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 16eb657bc9b7..1810cc00de51 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -28,7 +28,7 @@ zksync_protobuf.workspace = true zksync_test_account.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true zksync_system_constants.workspace = true zksync_base_token_adjuster.workspace = true diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs new file mode 100644 index 000000000000..2fa5c3b9c128 --- /dev/null +++ b/core/node/state_keeper/src/executor/mod.rs @@ -0,0 +1,60 @@ +use zksync_multivm::interface::{ + BatchTransactionExecutionResult, Call, CompressedBytecodeInfo, ExecutionResult, Halt, + VmExecutionResultAndLogs, +}; +use zksync_types::Transaction; +pub use zksync_vm_executor::batch::MainBatchExecutorFactory; + +use crate::ExecutionMetricsForCriteria; + +#[cfg(test)] +mod tests; + +/// State keeper representation of a transaction executed in the virtual machine. +/// +/// A separate type allows to be more typesafe when dealing with halted transactions. It also simplifies testing seal criteria +/// (i.e., without picking transactions that actually produce appropriate `ExecutionMetricsForCriteria`). +#[derive(Debug, Clone)] +pub enum TxExecutionResult { + /// Successful execution of the tx and the block tip dry run. + Success { + tx_result: Box, + tx_metrics: Box, + compressed_bytecodes: Vec, + call_tracer_result: Vec, + gas_remaining: u32, + }, + /// The VM rejected the tx for some reason. + RejectedByVm { reason: Halt }, + /// Bootloader gas limit is not enough to execute the tx. + BootloaderOutOfGasForTx, +} + +impl TxExecutionResult { + pub(crate) fn new(res: BatchTransactionExecutionResult, tx: &Transaction) -> Self { + match res.tx_result.result { + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas, + } => Self::BootloaderOutOfGasForTx, + ExecutionResult::Halt { reason } => Self::RejectedByVm { reason }, + _ => Self::Success { + tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), + gas_remaining: res.tx_result.statistics.gas_remaining, + tx_result: res.tx_result, + compressed_bytecodes: res.compressed_bytecodes, + call_tracer_result: res.call_traces, + }, + } + } + + /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. + pub(super) fn err(&self) -> Option<&Halt> { + match self { + Self::Success { .. } => None, + Self::RejectedByVm { + reason: rejection_reason, + } => Some(rejection_reason), + Self::BootloaderOutOfGasForTx => Some(&Halt::BootloaderOutOfGas), + } + } +} diff --git a/core/node/state_keeper/src/batch_executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs similarity index 89% rename from core/node/state_keeper/src/batch_executor/tests/mod.rs rename to core/node/state_keeper/src/executor/tests/mod.rs index ab9115991deb..6fa4522d43fd 100644 --- a/core/node/state_keeper/src/batch_executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -1,35 +1,39 @@ +// FIXME: move storage-agnostic tests to VM executor crate + use assert_matches::assert_matches; use test_casing::{test_casing, Product}; +use tester::AccountFailedCall; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::{BatchTransactionExecutionResult, ExecutionResult, Halt}; use zksync_test_account::Account; use zksync_types::{ get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, }; use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; -use super::TxExecutionResult; mod read_storage_factory; mod tester; /// Ensures that the transaction was executed successfully. -fn assert_executed(execution_result: &TxExecutionResult) { - assert_matches!(execution_result, TxExecutionResult::Success { .. }); +fn assert_executed(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!( + result, + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } + ); } /// Ensures that the transaction was rejected by the VM. -fn assert_rejected(execution_result: &TxExecutionResult) { - assert_matches!(execution_result, TxExecutionResult::RejectedByVm { .. }); +fn assert_rejected(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!(result, ExecutionResult::Halt { reason } if !matches!(reason, Halt::BootloaderOutOfGas)); } /// Ensures that the transaction was executed successfully but reverted by the VM. -fn assert_reverted(execution_result: &TxExecutionResult) { - assert_executed(execution_result); - if let TxExecutionResult::Success { tx_result, .. } = execution_result { - assert!(tx_result.result.is_failed()); - } else { - unreachable!(); - } +fn assert_reverted(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!(result, ExecutionResult::Revert { .. }); } #[derive(Debug, Clone, Copy)] @@ -189,23 +193,11 @@ async fn rollback(vm_mode: FastVmMode) { executor.rollback_last_tx().await.unwrap(); // Execute the same transaction, it must succeed. - let res_new = executor.execute_tx(tx).await.unwrap(); + let res_new = executor.execute_tx(tx.clone()).await.unwrap(); assert_executed(&res_new); - let ( - TxExecutionResult::Success { - tx_metrics: tx_metrics_old, - .. - }, - TxExecutionResult::Success { - tx_metrics: tx_metrics_new, - .. - }, - ) = (res_old, res_new) - else { - unreachable!(); - }; - + let tx_metrics_old = res_old.tx_result.get_execution_metrics(Some(&tx)); + let tx_metrics_new = res_new.tx_result.get_execution_metrics(Some(&tx)); assert_eq!( tx_metrics_old, tx_metrics_new, "Execution results must be the same" @@ -309,6 +301,27 @@ async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { executor.finish_batch().await.unwrap(); } +#[test_casing(3, FAST_VM_MODES)] +#[tokio::test] +async fn deploy_failedcall(vm_mode: FastVmMode) { + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + + let mut tester = Tester::new(connection_pool, vm_mode); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + + let tx = alice.deploy_failedcall_tx(); + + let execute_tx = executor.execute_tx(tx.tx).await.unwrap(); + assert_executed(&execute_tx); + + executor.finish_batch().await.unwrap(); +} + /// Checks that a tx that is reverted by the VM still can be included into a batch. #[test_casing(3, FAST_VM_MODES)] #[tokio::test] @@ -426,7 +439,12 @@ async fn bootloader_out_of_gas_for_any_tx(vm_mode: FastVmMode) { .await; let res = executor.execute_tx(alice.execute()).await.unwrap(); - assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); + assert_matches!( + res.tx_result.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); } /// Checks that we can handle the bootloader out of gas error on tip phase. @@ -447,7 +465,7 @@ async fn bootloader_tip_out_of_gas() { let res = executor.execute_tx(alice.execute()).await.unwrap(); assert_executed(&res); - let finished_batch = executor.finish_batch().await.unwrap(); + let (finished_batch, _) = executor.finish_batch().await.unwrap(); // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. @@ -469,7 +487,12 @@ async fn bootloader_tip_out_of_gas() { .await; let res = second_executor.execute_tx(alice.execute()).await.unwrap(); - assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); + assert_matches!( + res.tx_result.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); } #[tokio::test] diff --git a/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs b/core/node/state_keeper/src/executor/tests/read_storage_factory.rs similarity index 100% rename from core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs rename to core/node/state_keeper/src/executor/tests/read_storage_factory.rs diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs similarity index 91% rename from core/node/state_keeper/src/batch_executor/tests/tester.rs rename to core/node/state_keeper/src/executor/tests/tester.rs index e70c8b06fe0d..8256435f2f5b 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -6,16 +6,22 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc}; use tempfile::TempDir; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_contracts::{ + get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, TestContract, +}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ - interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + L1BatchEnv, L2BlockEnv, SystemEnv, + }, utils::StorageWritesDeduplicator, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; use zksync_node_test_utils::{recover, Snapshot}; -use zksync_state::{ReadStorageFactory, RocksdbStorageOptions}; +use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, @@ -29,14 +35,14 @@ use zksync_types::{ StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::{read_storage_factory::RocksdbStorageFactory, StorageType}; use crate::{ - batch_executor::{BatchExecutorHandle, TxExecutionResult}, testonly, testonly::BASE_SYSTEM_CONTRACTS, tests::{default_l1_batch_env, default_system_env}, - AsyncRocksdbCache, BatchExecutor, MainBatchExecutor, + AsyncRocksdbCache, }; /// Representation of configuration parameters used by the state keeper. @@ -97,7 +103,7 @@ impl Tester { pub(super) async fn create_batch_executor( &mut self, storage_type: StorageType, - ) -> BatchExecutorHandle { + ) -> Box> { let (l1_batch_env, system_env) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { @@ -142,8 +148,8 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle { - let mut batch_executor = MainBatchExecutor::new(self.config.save_call_traces, false); + ) -> Box> { + let mut batch_executor = MainBatchExecutorFactory::new(self.config.save_call_traces, false); batch_executor.set_fast_vm_mode(self.config.fast_vm_mode); let (_stop_sender, stop_receiver) = watch::channel(false); @@ -158,7 +164,7 @@ impl Tester { pub(super) async fn recover_batch_executor( &mut self, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { let (storage_factory, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -175,7 +181,7 @@ impl Tester { &mut self, storage_type: &StorageType, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { match storage_type { StorageType::AsyncRocksdbCache => self.recover_batch_executor(snapshot).await, StorageType::Rocksdb => { @@ -199,7 +205,7 @@ impl Tester { &self, storage_factory: Arc, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; let (mut l1_batch_env, system_env) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); @@ -259,9 +265,8 @@ impl Tester { /// Adds funds for specified account list. /// Expects genesis to be performed (i.e. `setup_storage` called beforehand). pub(super) async fn fund(&self, addresses: &[Address]) { - let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); - let eth_amount = U256::from(10u32).pow(U256::from(32)); //10^32 wei + let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); for address in addresses { let key = storage_key_for_standard_token_balance( @@ -333,6 +338,24 @@ pub trait AccountLoadNextExecutable { ) -> Transaction; } +pub trait AccountFailedCall { + fn deploy_failedcall_tx(&mut self) -> DeployContractsTx; +} + +impl AccountFailedCall for Account { + fn deploy_failedcall_tx(&mut self) -> DeployContractsTx { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"); + let failedcall_contract = TestContract { + bytecode, + contract: load_contract("etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"), + factory_deps: vec![], + }; + + self.get_deploy_tx(&failedcall_contract.bytecode, None, TxType::L2) + } +} + impl AccountLoadNextExecutable for Account { fn deploy_loadnext_tx(&mut self) -> DeployContractsTx { let loadnext_contract = get_loadnext_contract(); @@ -485,13 +508,10 @@ impl StorageSnapshot { let tx = alice.execute(); let tx_hash = tx.hash(); // probably incorrect let res = executor.execute_tx(tx).await.unwrap(); - if let TxExecutionResult::Success { tx_result, .. } = res { - let storage_logs = &tx_result.logs.storage_logs; - storage_writes_deduplicator - .apply(storage_logs.iter().filter(|log| log.log.is_write())); - } else { - panic!("Unexpected tx execution result: {res:?}"); - }; + assert!(!res.was_halted()); + let tx_result = res.tx_result; + let storage_logs = &tx_result.logs.storage_logs; + storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log.is_write())); let mut hasher = L2BlockHasher::new( L2BlockNumber(l2_block_env.number), @@ -506,7 +526,7 @@ impl StorageSnapshot { executor.start_next_l2_block(l2_block_env).await.unwrap(); } - let finished_batch = executor.finish_batch().await.unwrap(); + let (finished_batch, _) = executor.finish_batch().await.unwrap(); let storage_logs = &finished_batch.block_tip_execution_result.logs.storage_logs; storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log.is_write())); let modified_entries = storage_writes_deduplicator.into_modified_key_values(); diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 4d2907e82913..9ea699234f8f 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -19,7 +19,7 @@ use zksync_types::{ block::L2BlockHasher, protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, ProtocolVersionId, }; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use super::*; diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index c3d8dc1dee4d..5734977538bd 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -19,7 +19,7 @@ use zksync_types::{ }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::{ io::{ diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 384b0f45b0f6..f8106fd2423b 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -7,7 +7,7 @@ use zksync_types::{ block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; -use zksync_vm_utils::storage::l1_batch_params; +use zksync_vm_executor::storage::l1_batch_params; pub use self::{ common::IoCursor, diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 4dfb7400ffc6..24b1ffca631c 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -352,7 +352,7 @@ mod tests { use assert_matches::assert_matches; use futures::FutureExt; use zksync_dal::CoreDal; - use zksync_multivm::interface::VmExecutionMetrics; + use zksync_multivm::interface::{FinishedL1Batch, VmExecutionMetrics}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ api::TransactionStatus, block::BlockGasCount, writes::StateDiffRecord, L1BatchNumber, @@ -363,7 +363,6 @@ mod tests { use super::*; use crate::{ io::L2BlockParams, - testonly::default_vm_batch_result, tests::{ create_execution_result, create_transaction, create_updates_manager, default_l1_batch_env, default_system_env, Query, @@ -473,7 +472,7 @@ mod tests { virtual_blocks: 1, }); - let mut batch_result = default_vm_batch_result(); + let mut batch_result = FinishedL1Batch::mock(); batch_result.final_execution_state.deduplicated_storage_logs = storage_logs.iter().map(|log| log.log).collect(); batch_result.state_diffs = Some( diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index a610194ab9ca..d36ceec7d70c 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -1,34 +1,35 @@ use std::{ convert::Infallible, - fmt, sync::Arc, time::{Duration, Instant}, }; use anyhow::Context as _; -use async_trait::async_trait; use tokio::sync::watch; use tracing::{info_span, Instrument}; use zksync_multivm::{ - interface::{Halt, L1BatchEnv, SystemEnv}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + Halt, L1BatchEnv, SystemEnv, + }, utils::StorageWritesDeduplicator, }; -use zksync_state::ReadStorageFactory; +use zksync_shared_metrics::{TxStage, APP_METRICS}; +use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, }; -use super::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, TxExecutionResult}, +use crate::{ + executor::TxExecutionResult, io::{IoCursor, L1BatchParams, L2BlockParams, OutputHandler, PendingBatchData, StateKeeperIO}, metrics::{AGGREGATION_METRICS, KEEPER_METRICS, L1_BATCH_METRICS}, - seal_criteria::{ConditionalSealer, SealData, SealResolution}, + seal_criteria::{ConditionalSealer, SealData, SealResolution, UnexecutableReason}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, utils::gas_count_from_writes, }; -use crate::seal_criteria::UnexecutableReason; /// Amount of time to block on waiting for some resource. The exact value is not really important, /// we only need it to not block on waiting indefinitely and be able to process cancellation requests. @@ -52,45 +53,6 @@ impl Error { } } -/// Functionality [`BatchExecutor`] + [`ReadStorageFactory`] with an erased storage type. This allows to keep -/// [`ZkSyncStateKeeper`] not parameterized by the storage type, simplifying its dependency injection and usage in tests. -#[async_trait] -trait ErasedBatchExecutor: fmt::Debug + Send { - async fn init_batch( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - stop_receiver: &watch::Receiver, - ) -> Result; -} - -/// The only [`ErasedBatchExecutor`] implementation. -#[derive(Debug)] -struct ErasedBatchExecutorImpl { - batch_executor: Box>, - storage_factory: Arc>, -} - -#[async_trait] -impl ErasedBatchExecutor for ErasedBatchExecutorImpl { - async fn init_batch( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - stop_receiver: &watch::Receiver, - ) -> Result { - let storage = self - .storage_factory - .access_storage(stop_receiver, l1_batch_env.number - 1) - .await - .context("failed creating VM storage")? - .ok_or(Error::Canceled)?; - Ok(self - .batch_executor - .init_batch(storage, l1_batch_env, system_env)) - } -} - /// State keeper represents a logic layer of L1 batch / L2 block processing flow. /// It's responsible for taking all the data from the `StateKeeperIO`, feeding it into `BatchExecutor` objects /// and calling `SealManager` to decide whether an L2 block or L1 batch should be sealed. @@ -105,28 +67,27 @@ pub struct ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, output_handler: OutputHandler, - batch_executor: Box, + batch_executor: Box>, sealer: Arc, + storage_factory: Arc, } impl ZkSyncStateKeeper { - pub fn new( + pub fn new( stop_receiver: watch::Receiver, sequencer: Box, - batch_executor: Box>, + batch_executor: Box>, output_handler: OutputHandler, sealer: Arc, - storage_factory: Arc>, + storage_factory: Arc, ) -> Self { Self { stop_receiver, io: sequencer, - batch_executor: Box::new(ErasedBatchExecutorImpl { - batch_executor, - storage_factory, - }), + batch_executor, output_handler, sealer, + storage_factory, } } @@ -190,21 +151,20 @@ impl ZkSyncStateKeeper { .await?; let mut batch_executor = self - .batch_executor - .init_batch( - l1_batch_env.clone(), - system_env.clone(), - &self.stop_receiver, - ) - .await?; - self.restore_state(&mut batch_executor, &mut updates_manager, pending_l2_blocks) + .create_batch_executor(l1_batch_env.clone(), system_env.clone()) .await?; + self.restore_state( + &mut *batch_executor, + &mut updates_manager, + pending_l2_blocks, + ) + .await?; let mut l1_batch_seal_delta: Option = None; while !self.is_canceled() { // This function will run until the batch can be sealed. self.process_l1_batch( - &mut batch_executor, + &mut *batch_executor, &mut updates_manager, protocol_upgrade_tx, ) @@ -220,12 +180,12 @@ impl ZkSyncStateKeeper { Self::start_next_l2_block( new_l2_block_params, &mut updates_manager, - &mut batch_executor, + &mut *batch_executor, ) .await?; } - let finished_batch = batch_executor.finish_batch().await?; + let (finished_batch, _) = batch_executor.finish_batch().await?; let sealed_batch_protocol_version = updates_manager.protocol_version(); updates_manager.finish_batch(finished_batch); let mut next_cursor = updates_manager.io_cursor(); @@ -244,12 +204,7 @@ impl ZkSyncStateKeeper { (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self - .batch_executor - .init_batch( - l1_batch_env.clone(), - system_env.clone(), - &self.stop_receiver, - ) + .create_batch_executor(l1_batch_env.clone(), system_env.clone()) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -262,6 +217,22 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } + async fn create_batch_executor( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + ) -> Result>, Error> { + let storage = self + .storage_factory + .access_storage(&self.stop_receiver, l1_batch_env.number - 1) + .await + .context("failed creating VM storage")? + .ok_or(Error::Canceled)?; + Ok(self + .batch_executor + .init_batch(storage, l1_batch_env, system_env)) + } + /// This function is meant to be called only once during the state-keeper initialization. /// It will check if we should load a protocol upgrade or a `setChainId` transaction, /// perform some checks and return it. @@ -418,7 +389,7 @@ impl ZkSyncStateKeeper { async fn start_next_l2_block( params: L2BlockParams, updates_manager: &mut UpdatesManager, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, ) -> anyhow::Result<()> { updates_manager.push_l2_block(params); let block_env = updates_manager.l2_block.get_env(); @@ -460,7 +431,7 @@ impl ZkSyncStateKeeper { )] async fn restore_state( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, l2_blocks_to_reexecute: Vec, ) -> Result<(), Error> { @@ -491,6 +462,10 @@ impl ZkSyncStateKeeper { .execute_tx(tx.clone()) .await .with_context(|| format!("failed re-executing transaction {:?}", tx.hash()))?; + let result = TxExecutionResult::new(result, &tx); + + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); let TxExecutionResult::Success { tx_result, @@ -564,7 +539,7 @@ impl ZkSyncStateKeeper { )] async fn process_l1_batch( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, protocol_upgrade_tx: Option, ) -> Result<(), Error> { @@ -692,7 +667,7 @@ impl ZkSyncStateKeeper { async fn process_upgrade_tx( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, protocol_upgrade_tx: ProtocolUpgradeTx, ) -> anyhow::Result<()> { @@ -759,7 +734,7 @@ impl ZkSyncStateKeeper { #[tracing::instrument(skip_all)] async fn process_one_tx( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, tx: Transaction, ) -> anyhow::Result<(SealResolution, TxExecutionResult)> { @@ -768,8 +743,12 @@ impl ZkSyncStateKeeper { .execute_tx(tx.clone()) .await .with_context(|| format!("failed executing transaction {:?}", tx.hash()))?; + let exec_result = TxExecutionResult::new(exec_result, &tx); latency.observe(); + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let latency = KEEPER_METRICS.determine_seal_resolution.start(); // All of `TxExecutionResult::BootloaderOutOfGasForTx`, // `Halt::NotEnoughGasProvided` correspond to out-of-gas errors but of different nature. diff --git a/core/node/state_keeper/src/lib.rs b/core/node/state_keeper/src/lib.rs index 1c12f7825486..c12e4163fdd4 100644 --- a/core/node/state_keeper/src/lib.rs +++ b/core/node/state_keeper/src/lib.rs @@ -1,18 +1,4 @@ -use std::sync::Arc; - -use tokio::sync::watch; -use zksync_config::configs::{ - chain::{MempoolConfig, StateKeeperConfig}, - wallets, -}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_node_fee_model::BatchFeeModelInputProvider; -use zksync_types::L2ChainId; - pub use self::{ - batch_executor::{ - main_executor::MainBatchExecutor, BatchExecutor, BatchExecutorHandle, TxExecutionResult, - }, io::{ mempool::MempoolIO, L2BlockParams, L2BlockSealerTask, OutputHandler, StateKeeperIO, StateKeeperOutputHandler, StateKeeperPersistence, TreeWritesPersistence, @@ -25,7 +11,7 @@ pub use self::{ updates::UpdatesManager, }; -mod batch_executor; +pub mod executor; pub mod io; mod keeper; mod mempool_actor; @@ -38,41 +24,3 @@ pub(crate) mod tests; pub(crate) mod types; pub mod updates; pub(crate) mod utils; - -#[allow(clippy::too_many_arguments)] -pub async fn create_state_keeper( - state_keeper_config: StateKeeperConfig, - wallets: wallets::StateKeeper, - async_cache: AsyncRocksdbCache, - l2chain_id: L2ChainId, - mempool_config: &MempoolConfig, - pool: ConnectionPool, - mempool: MempoolGuard, - batch_fee_input_provider: Arc, - output_handler: OutputHandler, - stop_receiver: watch::Receiver, -) -> ZkSyncStateKeeper { - let batch_executor_base = MainBatchExecutor::new(state_keeper_config.save_call_traces, false); - - let io = MempoolIO::new( - mempool, - batch_fee_input_provider, - pool, - &state_keeper_config, - wallets.fee_account.address(), - mempool_config.delay_interval(), - l2chain_id, - ) - .expect("Failed initializing main node I/O for state keeper"); - - let sealer = SequencerSealer::new(state_keeper_config); - - ZkSyncStateKeeper::new( - stop_receiver, - Box::new(io), - Box::new(batch_executor_base), - output_handler, - Arc::new(sealer), - Arc::new(async_cache), - ) -} diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 1bf314d1b91e..7da5babd2199 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -10,10 +10,7 @@ use vise::{ Metrics, }; use zksync_mempool::MempoolStore; -use zksync_multivm::interface::{ - DeduplicatedWritesMetrics, VmExecutionResultAndLogs, VmRevertReason, -}; -use zksync_shared_metrics::InteractionType; +use zksync_multivm::interface::{DeduplicatedWritesMetrics, VmRevertReason}; use zksync_types::ProtocolVersionId; use super::seal_criteria::SealResolution; @@ -84,13 +81,6 @@ pub struct StateKeeperMetrics { /// The time it takes for transactions to be included in a block. Representative of the time user must wait before their transaction is confirmed. #[metrics(buckets = INCLUSION_DELAY_BUCKETS)] pub transaction_inclusion_delay: Family>, - /// Time spent by the state keeper on transaction execution. - #[metrics(buckets = Buckets::LATENCIES)] - pub tx_execution_time: Family>, - /// Number of times gas price was reported as too high. - pub gas_price_too_high: Counter, - /// Number of times blob base fee was reported as too high. - pub blob_base_fee_too_high: Counter, /// The time it takes to match seal resolution for each tx. #[metrics(buckets = Buckets::LATENCIES)] pub match_seal_resolution: Histogram, @@ -439,52 +429,9 @@ impl SealProgress<'_> { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "command", rename_all = "snake_case")] -pub(super) enum ExecutorCommand { - ExecuteTx, - #[metrics(name = "start_next_miniblock")] - StartNextL2Block, - RollbackLastTx, - FinishBatch, - FinishBatchWithCache, -} - -const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ - 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., -]); - -/// Executor-related state keeper metrics. -#[derive(Debug, Metrics)] -#[metrics(prefix = "state_keeper")] -pub(super) struct ExecutorMetrics { - /// Latency to process a single command sent to the batch executor. - #[metrics(buckets = Buckets::LATENCIES)] - pub batch_executor_command_response_time: Family>, - /// Cumulative latency of interacting with the storage when executing a transaction - /// in the batch executor. - #[metrics(buckets = Buckets::LATENCIES)] - pub batch_storage_interaction_duration: Family>, - #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] - pub computational_gas_per_nanosecond: Histogram, - #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] - pub failed_tx_gas_limit_per_nanosecond: Histogram, -} - -#[vise::register] -pub(super) static EXECUTOR_METRICS: vise::Global = vise::Global::new(); - #[derive(Debug, Metrics)] #[metrics(prefix = "batch_tip")] pub(crate) struct BatchTipMetrics { - #[metrics(buckets = Buckets::exponential(60000.0..=80000000.0, 2.0))] - gas_used: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] - pubdata_published: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] - circuit_statistic: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] - execution_metrics_size: Histogram, #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] block_writes_metrics_positive_size: Histogram, #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] @@ -492,17 +439,6 @@ pub(crate) struct BatchTipMetrics { } impl BatchTipMetrics { - pub fn observe(&self, execution_result: &VmExecutionResultAndLogs) { - self.gas_used - .observe(execution_result.statistics.gas_used as usize); - self.pubdata_published - .observe(execution_result.statistics.pubdata_published as usize); - self.circuit_statistic - .observe(execution_result.statistics.circuit_statistic.total()); - self.execution_metrics_size - .observe(execution_result.get_execution_metrics(None).size()); - } - pub fn observe_writes_metrics( &self, initial_writes_metrics: &DeduplicatedWritesMetrics, diff --git a/core/node/state_keeper/src/state_keeper_storage.rs b/core/node/state_keeper/src/state_keeper_storage.rs index 1b35f8ef73d0..f29115f9570e 100644 --- a/core/node/state_keeper/src/state_keeper_storage.rs +++ b/core/node/state_keeper/src/state_keeper_storage.rs @@ -70,7 +70,9 @@ impl ReadStorageFactory for AsyncRocksdbCache { Ok(storage) } else { Ok(Some( - OwnedStorage::postgres(connection, l1_batch_number).await?, + OwnedStorage::postgres(connection, l1_batch_number) + .await? + .into(), )) } } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index d17261a3a0f7..23aec8af49fb 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -1,14 +1,17 @@ //! Test utilities that can be used for testing sequencer that may //! be useful outside of this crate. +use async_trait::async_trait; use once_cell::sync::Lazy; -use tokio::sync::mpsc; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal as _}; use zksync_multivm::interface::{ - storage::StorageViewCache, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, - Refunds, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + executor::{BatchExecutor, BatchExecutorFactory}, + storage::{InMemoryStorage, StorageView}, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionResultAndLogs, }; +use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -17,94 +20,62 @@ use zksync_types::{ }; use zksync_utils::u256_to_h256; -use crate::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, - types::ExecutionMetricsForCriteria, -}; - pub mod test_batch_executor; pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(super) fn default_vm_batch_result() -> FinishedL1Batch { - FinishedL1Batch { - block_tip_execution_result: VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, - logs: VmExecutionLogs::default(), - statistics: VmExecutionStatistics::default(), - refunds: Refunds::default(), - }, - final_execution_state: CurrentExecutionState { - events: vec![], - deduplicated_storage_logs: vec![], - used_contract_hashes: vec![], - user_l2_to_l1_logs: vec![], - system_logs: vec![], - storage_refunds: Vec::new(), - pubdata_costs: Vec::new(), - }, - final_bootloader_memory: Some(vec![]), - pubdata_input: Some(vec![]), - state_diffs: Some(vec![]), - } -} - /// Creates a `TxExecutionResult` object denoting a successful tx execution. -pub(crate) fn successful_exec() -> TxExecutionResult { - TxExecutionResult::Success { +pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), }), - tx_metrics: Box::new(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }), compressed_bytecodes: vec![], - call_tracer_result: vec![], - gas_remaining: Default::default(), + call_traces: vec![], } } -pub(crate) fn storage_view_cache() -> StorageViewCache { - StorageViewCache::default() -} - /// `BatchExecutor` which doesn't check anything at all. Accepts all transactions. #[derive(Debug)] pub struct MockBatchExecutor; -impl BatchExecutor<()> for MockBatchExecutor { +impl BatchExecutorFactory for MockBatchExecutor { fn init_batch( &mut self, - _storage: (), - _l1batch_params: L1BatchEnv, + _storage: OwnedStorage, + _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { - let (send, recv) = mpsc::channel(1); - let handle = tokio::task::spawn(async { - let mut recv = recv; - while let Some(cmd) = recv.recv().await { - match cmd { - Command::ExecuteTx(_, resp) => resp.send(successful_exec()).unwrap(), - Command::StartNextL2Block(_, resp) => resp.send(()).unwrap(), - Command::RollbackLastTx(_) => panic!("unexpected rollback"), - Command::FinishBatch(resp) => { - // Blanket result, it doesn't really matter. - resp.send(default_vm_batch_result()).unwrap(); - break; - } - Command::FinishBatchWithCache(resp) => resp - .send((default_vm_batch_result(), storage_view_cache())) - .unwrap(), - } - } - anyhow::Ok(()) - }); - BatchExecutorHandle::from_raw(handle, send) + ) -> Box> { + Box::new(Self) + } +} + +#[async_trait] +impl BatchExecutor for MockBatchExecutor { + async fn execute_tx( + &mut self, + _tx: Transaction, + ) -> anyhow::Result { + Ok(successful_exec()) + } + + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + panic!("unexpected rollback"); + } + + async fn start_next_l2_block(&mut self, _env: L2BlockEnv) -> anyhow::Result<()> { + Ok(()) + } + + async fn finish_batch( + self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { + let storage = OwnedStorage::boxed(InMemoryStorage::default()); + Ok((FinishedL1Batch::mock(), StorageView::new(storage))) } } @@ -146,7 +117,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { pub(crate) const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; -pub(crate) fn fee(gas_limit: u32) -> Fee { +pub fn fee(gas_limit: u32) -> Fee { Fee { gas_limit: U256::from(gas_limit), max_fee_per_gas: SYSTEM_CONTEXT_MINIMAL_BASE_FEE.into(), diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index d8ee36990a1c..ffca8dff8643 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -13,27 +13,28 @@ use std::{ }; use async_trait::async_trait; -use tokio::sync::{mpsc, watch}; +use tokio::sync::watch; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ - interface::{ExecutionResult, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + storage::InMemoryStorage, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_test_utils::create_l2_transaction; -use zksync_state::ReadStorageFactory; +use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, + Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; use crate::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, io::{IoCursor, L1BatchParams, L2BlockParams, PendingBatchData, StateKeeperIO}, seal_criteria::{IoSealCriteria, SequencerSealer, UnexecutableReason}, - testonly::{ - default_vm_batch_result, storage_view_cache, successful_exec, BASE_SYSTEM_CONTRACTS, - }, - types::ExecutionMetricsForCriteria, + testonly::{successful_exec, BASE_SYSTEM_CONTRACTS}, updates::UpdatesManager, OutputHandler, StateKeeperOutputHandler, ZkSyncStateKeeper, }; @@ -110,7 +111,7 @@ impl TestScenario { mut self, description: &'static str, tx: Transaction, - result: TxExecutionResult, + result: BatchTransactionExecutionResult, ) -> Self { self.actions .push_back(ScenarioItem::Tx(description, tx, result)); @@ -198,13 +199,13 @@ impl TestScenario { pub(crate) async fn run(self, sealer: SequencerSealer) { assert!(!self.actions.is_empty(), "Test scenario can't be empty"); - let batch_executor_base = TestBatchExecutorBuilder::new(&self); + let batch_executor = TestBatchExecutorBuilder::new(&self); let (stop_sender, stop_receiver) = watch::channel(false); let (io, output_handler) = TestIO::new(stop_sender, self); let state_keeper = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(sealer), Arc::new(MockReadStorageFactory), @@ -253,27 +254,33 @@ pub(crate) fn random_upgrade_tx(tx_number: u64) -> ProtocolUpgradeTx { } /// Creates a `TxExecutionResult` object denoting a successful tx execution with the given execution metrics. -pub(crate) fn successful_exec_with_metrics( - tx_metrics: ExecutionMetricsForCriteria, -) -> TxExecutionResult { - TxExecutionResult::Success { +pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, - logs: Default::default(), + logs: VmExecutionLogs { + user_l2_to_l1_logs: vec![UserL2ToL1Log::default()], + ..VmExecutionLogs::default() + }, statistics: Default::default(), refunds: Default::default(), }), - tx_metrics: Box::new(tx_metrics), compressed_bytecodes: vec![], - call_tracer_result: vec![], - gas_remaining: Default::default(), + call_traces: vec![], } } /// Creates a `TxExecutionResult` object denoting a tx that was rejected. -pub(crate) fn rejected_exec() -> TxExecutionResult { - TxExecutionResult::RejectedByVm { - reason: zksync_multivm::interface::Halt::InnerTxError, +pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { + tx_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Halt { reason }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + }), + compressed_bytecodes: vec![], + call_traces: vec![], } } @@ -283,7 +290,7 @@ enum ScenarioItem { NoTxsUntilNextAction(&'static str), /// Increments protocol version in IO state. IncrementProtocolVersion(&'static str), - Tx(&'static str, Transaction, TxExecutionResult), + Tx(&'static str, Transaction, BatchTransactionExecutionResult), Rollback(&'static str, Transaction), Reject(&'static str, Transaction, UnexecutableReason), L2BlockSeal( @@ -332,7 +339,7 @@ impl fmt::Debug for ScenarioItem { } } -type ExpectedTransactions = VecDeque>>; +type ExpectedTransactions = VecDeque>>; #[derive(Debug, Default)] pub struct TestBatchExecutorBuilder { @@ -348,7 +355,7 @@ pub struct TestBatchExecutorBuilder { impl TestBatchExecutorBuilder { pub(crate) fn new(scenario: &TestScenario) -> Self { let mut txs = VecDeque::new(); - let mut batch_txs = HashMap::new(); + let mut batch_txs = HashMap::<_, VecDeque>::new(); let mut rollback_set = HashSet::new(); // Insert data about the pending batch, if it exists. @@ -369,9 +376,7 @@ impl TestBatchExecutorBuilder { ScenarioItem::Tx(_, tx, result) => { batch_txs .entry(tx.hash()) - .and_modify(|txs: &mut VecDeque| { - txs.push_back(result.clone()) - }) + .and_modify(|txs| txs.push_back(result.clone())) .or_insert_with(|| { let mut txs = VecDeque::with_capacity(1); txs.push_back(result.clone()); @@ -410,34 +415,24 @@ impl TestBatchExecutorBuilder { } } -impl BatchExecutor<()> for TestBatchExecutorBuilder { +impl BatchExecutorFactory for TestBatchExecutorBuilder { fn init_batch( &mut self, - _storage: (), - _l1_batch_params: L1BatchEnv, + _storage: OwnedStorage, + _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { - let (commands_sender, commands_receiver) = mpsc::channel(1); - - let executor = TestBatchExecutor::new( - commands_receiver, - self.txs.pop_front().unwrap(), - self.rollback_set.clone(), - ); - let handle = tokio::task::spawn_blocking(move || { - executor.run(); - Ok(()) - }); - BatchExecutorHandle::from_raw(handle, commands_sender) + ) -> Box> { + let executor = + TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); + Box::new(executor) } } #[derive(Debug)] pub(super) struct TestBatchExecutor { - commands: mpsc::Receiver, /// Mapping tx -> response. /// The same transaction can be executed several times, so we use a sequence of responses and consume them by one. - txs: HashMap>, + txs: HashMap>, /// Set of transactions that are expected to be rolled back. rollback_set: HashSet, /// Last executed tx hash. @@ -446,64 +441,63 @@ pub(super) struct TestBatchExecutor { impl TestBatchExecutor { pub(super) fn new( - commands: mpsc::Receiver, - txs: HashMap>, + txs: HashMap>, rollback_set: HashSet, ) -> Self { Self { - commands, txs, rollback_set, last_tx: H256::default(), // We don't expect rollbacks until the first tx is executed. } } +} - pub(super) fn run(mut self) { - while let Some(cmd) = self.commands.blocking_recv() { - match cmd { - Command::ExecuteTx(tx, resp) => { - let result = self - .txs - .get_mut(&tx.hash()) - .unwrap() - .pop_front() - .unwrap_or_else(|| { - panic!( - "Received a request to execute an unknown transaction: {:?}", - tx - ) - }); - resp.send(result).unwrap(); - self.last_tx = tx.hash(); - } - Command::StartNextL2Block(_, resp) => { - resp.send(()).unwrap(); - } - Command::RollbackLastTx(resp) => { - // This is an additional safety check: IO would check that every rollback is included in the - // test scenario, but here we want to additionally check that each such request goes to the - // the batch executor as well. - if !self.rollback_set.contains(&self.last_tx) { - // Request to rollback an unexpected tx. - panic!( - "Received a request to rollback an unexpected tx. Last executed tx: {:?}", - self.last_tx - ) - } - resp.send(()).unwrap(); - // It's OK to not update `last_executed_tx`, since state keeper never should rollback more than 1 - // tx in a row, and it's going to cause a panic anyway. - } - Command::FinishBatch(resp) => { - // Blanket result, it doesn't really matter. - resp.send(default_vm_batch_result()).unwrap(); - return; - } - Command::FinishBatchWithCache(resp) => resp - .send((default_vm_batch_result(), storage_view_cache())) - .unwrap(), - } +#[async_trait] +impl BatchExecutor for TestBatchExecutor { + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result { + let result = self + .txs + .get_mut(&tx.hash()) + .unwrap() + .pop_front() + .unwrap_or_else(|| { + panic!( + "Received a request to execute an unknown transaction: {:?}", + tx + ) + }); + self.last_tx = tx.hash(); + Ok(result) + } + + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + // This is an additional safety check: IO would check that every rollback is included in the + // test scenario, but here we want to additionally check that each such request goes to the + // the batch executor as well. + if !self.rollback_set.contains(&self.last_tx) { + // Request to rollback an unexpected tx. + panic!( + "Received a request to rollback an unexpected tx. Last executed tx: {:?}", + self.last_tx + ) } + // It's OK to not update `last_executed_tx`, since state keeper never should rollback more than 1 + // tx in a row, and it's going to cause a panic anyway. + Ok(()) + } + + async fn start_next_l2_block(&mut self, _env: L2BlockEnv) -> anyhow::Result<()> { + Ok(()) + } + + async fn finish_batch( + self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { + let storage = OwnedStorage::boxed(InMemoryStorage::default()); + Ok((FinishedL1Batch::mock(), StorageView::new(storage))) } } @@ -809,12 +803,13 @@ impl StateKeeperIO for TestIO { pub struct MockReadStorageFactory; #[async_trait] -impl ReadStorageFactory<()> for MockReadStorageFactory { +impl ReadStorageFactory for MockReadStorageFactory { async fn access_storage( &self, _stop_receiver: &watch::Receiver, _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - Ok(Some(())) + ) -> anyhow::Result> { + let storage = InMemoryStorage::default(); + Ok(Some(OwnedStorage::boxed(storage))) } } diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index e9a0a57c6977..80de0f0beff9 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -11,7 +11,7 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_multivm::{ interface::{ ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, + VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -27,7 +27,6 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use crate::{ - batch_executor::TxExecutionResult, io::PendingBatchData, keeper::POLL_WAIT_DURATION, seal_criteria::{ @@ -37,14 +36,13 @@ use crate::{ testonly::{ successful_exec, test_batch_executor::{ - random_tx, random_upgrade_tx, rejected_exec, successful_exec_with_metrics, + random_tx, random_upgrade_tx, rejected_exec, successful_exec_with_log, MockReadStorageFactory, TestBatchExecutorBuilder, TestIO, TestScenario, FEE_ACCOUNT, }, BASE_SYSTEM_CONTRACTS, }, - types::ExecutionMetricsForCriteria, updates::UpdatesManager, - utils::l1_batch_base_cost, + utils::{gas_count_from_tx_and_metrics, l1_batch_base_cost}, ZkSyncStateKeeper, }; @@ -194,29 +192,28 @@ async fn sealed_by_number_of_txs() { #[tokio::test] async fn sealed_by_gas() { + let first_tx = random_tx(1); + let execution_result = successful_exec_with_log(); + let exec_metrics = execution_result + .tx_result + .get_execution_metrics(Some(&first_tx)); + assert!(exec_metrics.size() > 0); + let l1_gas_per_tx = gas_count_from_tx_and_metrics(&first_tx, &exec_metrics); + assert!(l1_gas_per_tx.commit > 0); + let config = StateKeeperConfig { - max_single_tx_gas: 62_002, + max_single_tx_gas: 62_000 + l1_gas_per_tx.commit * 2, reject_tx_at_gas_percentage: 1.0, close_block_at_gas_percentage: 0.5, ..StateKeeperConfig::default() }; let sealer = SequencerSealer::with_sealers(config, vec![Box::new(GasCriterion)]); - let l1_gas_per_tx = BlockGasCount { - commit: 1, // Both txs together with `block_base_cost` would bring it over the block `31_001` commit bound. - prove: 0, - execute: 0, - }; - let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { - l1_gas: l1_gas_per_tx, - execution_metrics: VmExecutionMetrics::default(), - }); - TestScenario::new() .seal_l2_block_when(|updates| { updates.l2_block.executed_transactions.len() == 1 }) - .next_tx("First tx", random_tx(1), execution_result.clone()) + .next_tx("First tx", first_tx, execution_result.clone()) .l2_block_sealed_with("L2 block with a single tx", move |updates| { assert_eq!( updates.l2_block.l1_gas_count, @@ -226,11 +223,11 @@ async fn sealed_by_gas() { }) .next_tx("Second tx", random_tx(1), execution_result) .l2_block_sealed("L2 block 2") - .batch_sealed_with("Batch sealed with both txs", |updates| { + .batch_sealed_with("Batch sealed with both txs", move |updates| { assert_eq!( updates.l1_batch.l1_gas_count, BlockGasCount { - commit: l1_batch_base_cost(AggregatedActionType::Commit) + 2, + commit: l1_batch_base_cost(AggregatedActionType::Commit) + l1_gas_per_tx.commit * 2, prove: l1_batch_base_cost(AggregatedActionType::PublishProofOnchain), execute: l1_batch_base_cost(AggregatedActionType::Execute), }, @@ -254,14 +251,7 @@ async fn sealed_by_gas_then_by_num_tx() { vec![Box::new(GasCriterion), Box::new(SlotsCriterion)], ); - let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { - l1_gas: BlockGasCount { - commit: 1, - prove: 0, - execute: 0, - }, - execution_metrics: VmExecutionMetrics::default(), - }); + let execution_result = successful_exec_with_log(); // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. TestScenario::new() @@ -316,7 +306,11 @@ async fn rejected_tx() { let rejected_tx = random_tx(1); TestScenario::new() .seal_l2_block_when(|updates| updates.l2_block.executed_transactions.len() == 1) - .next_tx("Rejected tx", rejected_tx.clone(), rejected_exec()) + .next_tx( + "Rejected tx", + rejected_tx.clone(), + rejected_exec(Halt::InnerTxError), + ) .tx_rejected( "Tx got rejected", rejected_tx, @@ -349,7 +343,7 @@ async fn bootloader_tip_out_of_gas_flow() { .next_tx( "Tx -> Bootloader tip out of gas", bootloader_out_of_gas_tx.clone(), - TxExecutionResult::BootloaderOutOfGasForTx, + rejected_exec(Halt::BootloaderOutOfGas), ) .tx_rollback( "Last tx rolled back to seal the block", @@ -424,7 +418,7 @@ async fn pending_batch_is_applied() { async fn load_upgrade_tx() { let sealer = SequencerSealer::default(); let scenario = TestScenario::new(); - let batch_executor_base = TestBatchExecutorBuilder::new(&scenario); + let batch_executor = TestBatchExecutorBuilder::new(&scenario); let (stop_sender, stop_receiver) = watch::channel(false); let (mut io, output_handler) = TestIO::new(stop_sender, scenario); @@ -434,7 +428,7 @@ async fn load_upgrade_tx() { let mut sk = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(sealer), Arc::new(MockReadStorageFactory), diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml index c975bbcd280a..7a5a4de5d0c9 100644 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ b/core/node/tee_verifier_input_producer/Cargo.toml @@ -18,7 +18,7 @@ zksync_queued_job_processor.workspace = true zksync_tee_verifier.workspace = true zksync_types.workspace = true zksync_utils.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index abd70542a42f..08382903ad6d 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -21,7 +21,7 @@ use zksync_queued_job_processor::JobProcessor; use zksync_tee_verifier::Verify; use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; use zksync_utils::u256_to_h256; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use self::metrics::METRICS; diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index cc6313fa5727..ceb11a982477 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -11,17 +11,16 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true +zksync_vm_interface.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_contracts.workspace = true zksync_state.workspace = true zksync_storage.workspace = true -zksync_state_keeper.workspace = true zksync_utils.workspace = true zksync_prover_interface.workspace = true zksync_object_store.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true zksync_health_check.workspace = true serde.workspace = true @@ -37,6 +36,7 @@ vise.workspace = true zksync_node_test_utils.workspace = true zksync_node_genesis.workspace = true zksync_test_account.workspace = true +assert_matches.workspace = true backon.workspace = true futures = { workspace = true, features = ["compat"] } rand.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f7f8c099609f..f23f63533ff5 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -6,16 +6,18 @@ use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; -use zksync_state_keeper::{BatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_state::OwnedStorage; use zksync_types::{ block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{executor::BatchExecutorFactory, L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, - OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, + L1BatchOutput, L2BlockOutput, OutputHandler, OutputHandlerFactory, VmRunner, VmRunnerIo, + VmRunnerStorage, }; /// A standalone component that retrieves all needed data for basic witness generation and saves it to the bucket @@ -30,7 +32,7 @@ impl BasicWitnessInputProducer { pub async fn new( pool: ConnectionPool, object_store: Arc, - batch_executor: Box, + batch_executor_factory: Box>, rocksdb_path: String, chain_id: L2ChainId, first_processed_batch: L1BatchNumber, @@ -53,7 +55,7 @@ impl BasicWitnessInputProducer { Box::new(io), Arc::new(loader), Box::new(output_handler_factory), - batch_executor, + batch_executor_factory, ); Ok(( Self { vm_runner }, @@ -145,30 +147,38 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { struct BasicWitnessInputProducerOutputHandler { pool: ConnectionPool, object_store: Arc, + system_env: SystemEnv, + l1_batch_number: L1BatchNumber, } #[async_trait] -impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { - async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for BasicWitnessInputProducerOutputHandler { + async fn handle_l2_block( + &mut self, + _env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { Ok(()) } #[tracing::instrument( name = "BasicWitnessInputProducerOutputHandler::handle_l1_batch", skip_all, - fields(l1_batch = %updates_manager.l1_batch.number) + fields(l1_batch = %self.l1_batch_number) )] - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { - let l1_batch_number = updates_manager.l1_batch.number; + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let l1_batch_number = self.l1_batch_number; let mut connection = self.pool.connection().await?; tracing::info!(%l1_batch_number, "Started saving VM run data"); - let result = - get_updates_manager_witness_input_data(&mut connection, updates_manager).await?; + let result = get_updates_manager_witness_input_data( + &mut connection, + &self.system_env, + l1_batch_number, + &output, + ) + .await?; assert_database_witness_input_data(&mut connection, l1_batch_number, &result).await; @@ -193,18 +203,13 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { #[tracing::instrument(skip_all)] async fn get_updates_manager_witness_input_data( connection: &mut Connection<'_, Core>, - updates_manager: Arc, + system_env: &SystemEnv, + l1_batch_number: L1BatchNumber, + output: &L1BatchOutput, ) -> anyhow::Result { - let l1_batch_number = updates_manager.l1_batch.number; - let finished_batch = updates_manager - .l1_batch - .finished - .clone() - .ok_or_else(|| anyhow!("L1 batch {l1_batch_number:?} is not finished"))?; - - let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty - let default_aa = updates_manager.base_system_contract_hashes().default_aa; - let bootloader = updates_manager.base_system_contract_hashes().bootloader; + let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty + let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(bootloader) @@ -220,9 +225,8 @@ async fn get_updates_manager_witness_input_data( .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - let hashes: HashSet = finished_batch - .final_execution_state - .used_contract_hashes + let used_contract_hashes = &output.batch.final_execution_state.used_contract_hashes; + let hashes: HashSet = used_contract_hashes .iter() // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` .filter(|&&hash| hash != h256_to_u256(bootloader)) @@ -232,33 +236,22 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_factory_deps(&hashes) .await; - if finished_batch - .final_execution_state - .used_contract_hashes - .contains(&account_code_hash) - { + if used_contract_hashes.contains(&account_code_hash) { used_bytecodes.insert(account_code_hash, account_bytecode); } - let storage_refunds = finished_batch.final_execution_state.storage_refunds; - let pubdata_costs = finished_batch.final_execution_state.pubdata_costs; - - let storage_view_cache = updates_manager - .storage_view_cache() - .expect("Storage view cache was not initialized"); - + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); + let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { - read_storage_key: storage_view_cache.read_storage_keys(), - is_write_initial: storage_view_cache.initial_writes(), + read_storage_key: output.storage_view_cache.read_storage_keys(), + is_write_initial: output.storage_view_cache.initial_writes(), }; Ok(VMRunWitnessInputData { l1_batch_number, used_bytecodes, initial_heap_content, - - protocol_version: updates_manager.protocol_version(), - + protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, @@ -389,11 +382,14 @@ struct BasicWitnessInputProducerOutputHandlerFactory { impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(BasicWitnessInputProducerOutputHandler { pool: self.pool.clone(), object_store: self.object_store.clone(), + system_env, + l1_batch_number: l1_batch_env.number, })) } } diff --git a/core/node/vm_runner/src/impls/mod.rs b/core/node/vm_runner/src/impls/mod.rs index 0911aec0561d..6b2f5dd0667f 100644 --- a/core/node/vm_runner/src/impls/mod.rs +++ b/core/node/vm_runner/src/impls/mod.rs @@ -10,7 +10,7 @@ pub use self::{ }, playground::{ VmPlayground, VmPlaygroundCursorOptions, VmPlaygroundIo, VmPlaygroundLoaderTask, - VmPlaygroundTasks, + VmPlaygroundStorageOptions, VmPlaygroundTasks, }, protective_reads::{ProtectiveReadsIo, ProtectiveReadsWriter, ProtectiveReadsWriterTasks}, }; diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index ad5623a1329d..091fa15fc953 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -15,12 +15,15 @@ use tokio::{ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_state::RocksdbStorage; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, - StorageSyncTask, VmRunner, VmRunnerIo, VmRunnerStorage, + storage::{PostgresLoader, StorageLoader}, + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, L1BatchOutput, + L2BlockOutput, OutputHandler, OutputHandlerFactory, StorageSyncTask, VmRunner, VmRunnerIo, + VmRunnerStorage, }; #[derive(Debug, Serialize)] @@ -35,6 +38,20 @@ impl From for Health { } } +/// Options configuring the storage loader for VM playground. +#[derive(Debug)] +#[non_exhaustive] +pub enum VmPlaygroundStorageOptions { + /// Use RocksDB cache. + Rocksdb(String), + /// Use prefetched batch snapshots (with fallback to Postgres if protective reads are not available for a batch). + Snapshots { + /// Whether to shadow snapshot storage with Postgres. This degrades performance and is mostly useful + /// to test snapshot correctness. + shadow: bool, + }, +} + /// Options related to the VM playground cursor. #[derive(Debug)] pub struct VmPlaygroundCursorOptions { @@ -46,16 +63,29 @@ pub struct VmPlaygroundCursorOptions { pub reset_state: bool, } +#[derive(Debug)] +enum VmPlaygroundStorage { + Rocksdb { + path: String, + task_sender: oneshot::Sender>, + }, + Snapshots { + shadow: bool, + }, +} + /// Virtual machine playground. Does not persist anything in Postgres; instead, keeps an L1 batch cursor as a plain text file in the RocksDB directory /// (so that the playground doesn't repeatedly process same batches after a restart). +/// +/// If the RocksDB directory is not specified, the playground works in the ephemeral mode: it takes all inputs from Postgres, doesn't maintain cache +/// and doesn't persist the processed batch cursor. This is mostly useful for debugging purposes. #[derive(Debug)] pub struct VmPlayground { pool: ConnectionPool, - batch_executor: MainBatchExecutor, - rocksdb_path: String, + batch_executor_factory: MainBatchExecutorFactory, + storage: VmPlaygroundStorage, chain_id: L2ChainId, io: VmPlaygroundIo, - loader_task_sender: oneshot::Sender>, output_handler_factory: ConcurrentOutputHandlerFactory, reset_to_batch: Option, @@ -66,14 +96,30 @@ impl VmPlayground { pub async fn new( pool: ConnectionPool, vm_mode: FastVmMode, - rocksdb_path: String, + storage: VmPlaygroundStorageOptions, chain_id: L2ChainId, cursor: VmPlaygroundCursorOptions, ) -> anyhow::Result<(Self, VmPlaygroundTasks)> { - tracing::info!("Starting VM playground with mode {vm_mode:?}, cursor options: {cursor:?}"); + tracing::info!("Starting VM playground with mode {vm_mode:?}, storage: {storage:?}, cursor options: {cursor:?}"); + + let cursor_file_path = match &storage { + VmPlaygroundStorageOptions::Rocksdb(path) => { + Some(Path::new(path).join("__vm_playground_cursor")) + } + VmPlaygroundStorageOptions::Snapshots { .. } => { + tracing::warn!( + "RocksDB cache is disabled; this can lead to significant performance degradation. Additionally, VM playground progress won't be persisted. \ + If this is not intended, set the cache path in app config" + ); + None + } + }; - let cursor_file_path = Path::new(&rocksdb_path).join("__vm_playground_cursor"); - let latest_processed_batch = VmPlaygroundIo::read_cursor(&cursor_file_path).await?; + let latest_processed_batch = if let Some(path) = &cursor_file_path { + VmPlaygroundIo::read_cursor(path).await? + } else { + None + }; tracing::info!("Latest processed batch: {latest_processed_batch:?}"); let latest_processed_batch = if cursor.reset_state { cursor.first_processed_batch @@ -81,8 +127,8 @@ impl VmPlayground { latest_processed_batch.unwrap_or(cursor.first_processed_batch) }; - let mut batch_executor = MainBatchExecutor::new(false, false); - batch_executor.set_fast_vm_mode(vm_mode); + let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); + batch_executor_factory.set_fast_vm_mode(vm_mode); let io = VmPlaygroundIo { cursor_file_path, @@ -97,24 +143,33 @@ impl VmPlayground { io.clone(), VmPlaygroundOutputHandler, ); - let (loader_task_sender, loader_task_receiver) = oneshot::channel(); + let (storage, loader_task) = match storage { + VmPlaygroundStorageOptions::Rocksdb(path) => { + let (task_sender, task_receiver) = oneshot::channel(); + let rocksdb = VmPlaygroundStorage::Rocksdb { path, task_sender }; + let loader_task = VmPlaygroundLoaderTask { + inner: task_receiver, + }; + (rocksdb, Some(loader_task)) + } + VmPlaygroundStorageOptions::Snapshots { shadow } => { + (VmPlaygroundStorage::Snapshots { shadow }, None) + } + }; let this = Self { pool, - batch_executor, - rocksdb_path, + batch_executor_factory, + storage, chain_id, io, - loader_task_sender, output_handler_factory, reset_to_batch: cursor.reset_state.then_some(cursor.first_processed_batch), }; Ok(( this, VmPlaygroundTasks { - loader_task: VmPlaygroundLoaderTask { - inner: loader_task_receiver, - }, + loader_task, output_handler_factory_task, }, )) @@ -132,7 +187,12 @@ impl VmPlayground { #[tracing::instrument(skip(self), err)] async fn reset_rocksdb_cache(&self, last_retained_batch: L1BatchNumber) -> anyhow::Result<()> { - let builder = RocksdbStorage::builder(self.rocksdb_path.as_ref()).await?; + let VmPlaygroundStorage::Rocksdb { path, .. } = &self.storage else { + tracing::warn!("No RocksDB path specified; skipping resetting cache"); + return Ok(()); + }; + + let builder = RocksdbStorage::builder(path.as_ref()).await?; let current_l1_batch = builder.l1_batch_number().await; if current_l1_batch <= Some(last_retained_batch) { tracing::info!("Resetting RocksDB cache is not required: its current batch #{current_l1_batch:?} is lower than the target"); @@ -150,10 +210,12 @@ impl VmPlayground { /// # Errors /// /// Propagates RocksDB and Postgres errors. - pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { - fs::create_dir_all(&self.rocksdb_path) - .await - .with_context(|| format!("cannot create dir `{}`", self.rocksdb_path))?; + pub async fn run(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + if let VmPlaygroundStorage::Rocksdb { path, .. } = &self.storage { + fs::create_dir_all(path) + .await + .with_context(|| format!("cannot create dir `{path}`"))?; + } if let Some(reset_to_batch) = self.reset_to_batch { self.io.health_updater.update(HealthStatus::Affected.into()); @@ -168,22 +230,28 @@ impl VmPlayground { self.io.update_health(); - let (loader, loader_task) = VmRunnerStorage::new( - self.pool.clone(), - self.rocksdb_path, - self.io.clone(), - self.chain_id, - ) - .await?; - self.loader_task_sender.send(loader_task).ok(); + let loader: Arc = match self.storage { + VmPlaygroundStorage::Rocksdb { path, task_sender } => { + let (loader, loader_task) = + VmRunnerStorage::new(self.pool.clone(), path, self.io.clone(), self.chain_id) + .await?; + task_sender.send(loader_task).ok(); + Arc::new(loader) + } + VmPlaygroundStorage::Snapshots { shadow } => { + let mut loader = PostgresLoader::new(self.pool.clone(), self.chain_id).await?; + loader.shadow_snapshots(shadow); + Arc::new(loader) + } + }; let vm_runner = VmRunner::new( self.pool, Box::new(self.io), - Arc::new(loader), + loader, Box::new(self.output_handler_factory), - Box::new(self.batch_executor), + Box::new(self.batch_executor_factory), ); - vm_runner.run(stop_receiver).await + vm_runner.run(&stop_receiver).await } } @@ -212,7 +280,7 @@ impl VmPlaygroundLoaderTask { #[derive(Debug)] pub struct VmPlaygroundTasks { /// Task that synchronizes storage with new available batches. - pub loader_task: VmPlaygroundLoaderTask, + pub loader_task: Option, /// Task that handles output from processed batches. pub output_handler_factory_task: ConcurrentOutputHandlerFactoryTask, } @@ -220,7 +288,7 @@ pub struct VmPlaygroundTasks { /// I/O powering [`VmPlayground`]. #[derive(Debug, Clone)] pub struct VmPlaygroundIo { - cursor_file_path: PathBuf, + cursor_file_path: Option, vm_mode: FastVmMode, window_size: u32, // We don't read this value from the cursor file in the `VmRunnerIo` implementation because reads / writes @@ -247,15 +315,16 @@ impl VmPlaygroundIo { } async fn write_cursor(&self, cursor: L1BatchNumber) -> anyhow::Result<()> { + let Some(cursor_file_path) = &self.cursor_file_path else { + return Ok(()); + }; let buffer = cursor.to_string(); - fs::write(&self.cursor_file_path, buffer) - .await - .with_context(|| { - format!( - "failed writing VM playground cursor to `{}`", - self.cursor_file_path.display() - ) - }) + fs::write(cursor_file_path, buffer).await.with_context(|| { + format!( + "failed writing VM playground cursor to `{}`", + cursor_file_path.display() + ) + }) } fn update_health(&self) { @@ -325,9 +394,17 @@ impl VmRunnerIo for VmPlaygroundIo { struct VmPlaygroundOutputHandler; #[async_trait] -impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - tracing::trace!("Processed L2 block #{}", updates_manager.l2_block.number); +impl OutputHandler for VmPlaygroundOutputHandler { + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { + tracing::trace!("Processed L2 block #{}", env.number); + Ok(()) + } + + async fn handle_l1_batch(self: Box, _output: Arc) -> anyhow::Result<()> { Ok(()) } } @@ -336,8 +413,9 @@ impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { impl OutputHandlerFactory for VmPlaygroundOutputHandler { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + _l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(Self)) } } diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index dfd5251fd39b..b620675b78e2 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -1,15 +1,16 @@ use std::sync::Arc; -use anyhow::Context; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{L1BatchNumber, L2ChainId, StorageLog}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, - OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, + L1BatchOutput, L2BlockOutput, OutputHandler, OutputHandlerFactory, VmRunner, VmRunnerIo, + VmRunnerStorage, }; /// A standalone component that writes protective reads asynchronously to state keeper. @@ -37,7 +38,7 @@ impl ProtectiveReadsWriter { let output_handler_factory = ProtectiveReadsOutputHandlerFactory { pool: pool.clone() }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); - let batch_processor = MainBatchExecutor::new(false, false); + let batch_processor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io), @@ -133,30 +134,29 @@ impl VmRunnerIo for ProtectiveReadsIo { #[derive(Debug)] struct ProtectiveReadsOutputHandler { + l1_batch_number: L1BatchNumber, pool: ConnectionPool, } #[async_trait] -impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { - async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for ProtectiveReadsOutputHandler { + async fn handle_l2_block( + &mut self, + _env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { Ok(()) } #[tracing::instrument( name = "ProtectiveReadsOutputHandler::handle_l1_batch", skip_all, - fields(l1_batch = %updates_manager.l1_batch.number) + fields(l1_batch = %self.l1_batch_number) )] - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { - let finished_batch = updates_manager - .l1_batch - .finished - .as_ref() - .context("L1 batch is not actually finished")?; - let (_, computed_protective_reads): (Vec, Vec) = finished_batch + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let l1_batch_number = self.l1_batch_number; + let (_, computed_protective_reads): (Vec, Vec) = output + .batch .final_execution_state .deduplicated_storage_logs .iter() @@ -168,12 +168,12 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { .await?; let mut written_protective_reads = connection .storage_logs_dedup_dal() - .get_protective_reads_for_l1_batch(updates_manager.l1_batch.number) + .get_protective_reads_for_l1_batch(l1_batch_number) .await?; if !written_protective_reads.is_empty() { tracing::debug!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, "Protective reads have already been written, validating" ); for protective_read in computed_protective_reads { @@ -181,7 +181,7 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { let key = protective_read.key.key(); if !written_protective_reads.remove(&protective_read.key) { tracing::error!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, address = %address, key = %key, "VM runner produced a protective read that did not happen in state keeper" @@ -190,7 +190,7 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { } for remaining_read in written_protective_reads { tracing::error!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, address = %remaining_read.address(), key = %remaining_read.key(), "State keeper produced a protective read that did not happen in VM runner" @@ -198,15 +198,12 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { } } else { tracing::debug!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, "Protective reads have not been written, writing" ); connection .storage_logs_dedup_dal() - .insert_protective_reads( - updates_manager.l1_batch.number, - &computed_protective_reads, - ) + .insert_protective_reads(l1_batch_number, &computed_protective_reads) .await?; } @@ -223,10 +220,12 @@ struct ProtectiveReadsOutputHandlerFactory { impl OutputHandlerFactory for ProtectiveReadsOutputHandlerFactory { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(ProtectiveReadsOutputHandler { pool: self.pool.clone(), + l1_batch_number: l1_batch_env.number, })) } } diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index 03e3f43baedc..63e2b5881aaf 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -16,7 +16,8 @@ mod tests; pub use self::{ io::VmRunnerIo, output_handler::{ - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, L1BatchOutput, + L2BlockOutput, OutputHandler, OutputHandlerFactory, }, process::VmRunner, storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}, diff --git a/core/node/vm_runner/src/output_handler.rs b/core/node/vm_runner/src/output_handler.rs index 4052c245a44f..25eae5e36845 100644 --- a/core/node/vm_runner/src/output_handler.rs +++ b/core/node/vm_runner/src/output_handler.rs @@ -1,9 +1,4 @@ -use std::{ - fmt::{Debug, Formatter}, - mem, - sync::Arc, - time::Duration, -}; +use std::{fmt, sync::Arc, time::Duration}; use anyhow::Context; use async_trait::async_trait; @@ -13,13 +8,52 @@ use tokio::{ task::JoinHandle, }; use zksync_dal::{ConnectionPool, Core}; -use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; -use zksync_types::L1BatchNumber; +use zksync_state::interface::StorageViewCache; +use zksync_types::{L1BatchNumber, Transaction}; +use zksync_vm_interface::{ + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, +}; use crate::{metrics::METRICS, VmRunnerIo}; type BatchReceiver = oneshot::Receiver>>; +/// Output from executing a single L2 block. +#[derive(Debug, Default)] +pub struct L2BlockOutput { + /// Executed transactions together with execution results. + pub transactions: Vec<(Transaction, BatchTransactionExecutionResult)>, +} + +impl L2BlockOutput { + pub(crate) fn push(&mut self, tx: Transaction, exec_result: BatchTransactionExecutionResult) { + self.transactions.push((tx, exec_result)); + } +} + +/// Output from executing L1 batch tip. +#[derive(Debug)] +pub struct L1BatchOutput { + /// Finished L1 batch. + pub batch: FinishedL1Batch, + /// Information about storage accesses for the batch. + pub storage_view_cache: StorageViewCache, +} + +/// Handler of batch execution. +#[async_trait] +pub trait OutputHandler: fmt::Debug + Send { + /// Handles an L2 block processed by the VM. + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + output: &L2BlockOutput, + ) -> anyhow::Result<()>; + + /// Handles an L1 batch processed by the VM. + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()>; +} + /// Functionality to produce a [`StateKeeperOutputHandler`] implementation for a specific L1 batch. /// /// The idea behind this trait is that often handling output data is independent of the order of the @@ -27,7 +61,7 @@ type BatchReceiver = oneshot::Receiver>>; /// simultaneously. Implementing this trait signifies that this property is held for the data the /// implementation is responsible for. #[async_trait] -pub trait OutputHandlerFactory: Debug + Send { +pub trait OutputHandlerFactory: fmt::Debug + Send { /// Creates a [`StateKeeperOutputHandler`] implementation for the provided L1 batch. Only /// supposed to be used for the L1 batch data it was created against. Using it for anything else /// will lead to errors. @@ -37,8 +71,9 @@ pub trait OutputHandlerFactory: Debug + Send { /// Propagates DB errors. async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result>; } /// A delegator factory that requires an underlying factory `F` that does the actual work, however @@ -57,8 +92,12 @@ pub struct ConcurrentOutputHandlerFactory Debug for ConcurrentOutputHandlerFactory { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for ConcurrentOutputHandlerFactory +where + Io: VmRunnerIo, + F: OutputHandlerFactory, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ConcurrentOutputHandlerFactory") .field("pool", &self.pool) .field("io", &self.io) @@ -101,8 +140,10 @@ impl OutputHandlerFactory { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { + let l1_batch_number = l1_batch_env.number; let mut conn = self.pool.connection_tagged(self.io.name()).await?; let latest_processed_batch = self.io.latest_processed_batch(&mut conn).await?; let last_processable_batch = self.io.last_ready_to_be_loaded_batch(&mut conn).await?; @@ -121,70 +162,50 @@ impl OutputHandlerFactory last_processable_batch ); - let handler = self.factory.create_handler(l1_batch_number).await?; + let handler = self + .factory + .create_handler(system_env, l1_batch_env) + .await?; let (sender, receiver) = oneshot::channel(); self.state.insert(l1_batch_number, receiver); - Ok(Box::new(AsyncOutputHandler::Running { handler, sender })) + Ok(Box::new(AsyncOutputHandler { handler, sender })) } } -enum AsyncOutputHandler { - Running { - handler: Box, - sender: oneshot::Sender>>, - }, - Finished, +struct AsyncOutputHandler { + handler: Box, + sender: oneshot::Sender>>, } -impl Debug for AsyncOutputHandler { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - AsyncOutputHandler::Running { handler, .. } => f - .debug_struct("AsyncOutputHandler::Running") - .field("handler", handler) - .finish(), - AsyncOutputHandler::Finished => f.debug_struct("AsyncOutputHandler::Finished").finish(), - } +impl fmt::Debug for AsyncOutputHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AsyncOutputHandler::Running") + .field("handler", &self.handler) + .finish() } } #[async_trait] -impl StateKeeperOutputHandler for AsyncOutputHandler { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - match self { - AsyncOutputHandler::Running { handler, .. } => { - handler.handle_l2_block(updates_manager).await - } - AsyncOutputHandler::Finished => { - Err(anyhow::anyhow!("Cannot handle any more L2 blocks")) - } - } - } - - async fn handle_l1_batch( +impl OutputHandler for AsyncOutputHandler { + async fn handle_l2_block( &mut self, - updates_manager: Arc, + env: L2BlockEnv, + output: &L2BlockOutput, ) -> anyhow::Result<()> { - let state = mem::replace(self, AsyncOutputHandler::Finished); - match state { - AsyncOutputHandler::Running { - mut handler, - sender, - } => { - sender - .send(tokio::task::spawn(async move { - let latency = METRICS.output_handle_time.start(); - let result = handler.handle_l1_batch(updates_manager).await; - latency.observe(); - result - })) - .ok(); - Ok(()) - } - AsyncOutputHandler::Finished => { - Err(anyhow::anyhow!("Cannot handle any more L1 batches")) - } - } + self.handler.handle_l2_block(env, output).await + } + + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let handler = self.handler; + self.sender + .send(tokio::task::spawn(async move { + let latency = METRICS.output_handle_time.start(); + let result = handler.handle_l1_batch(output).await; + latency.observe(); + result + })) + .ok(); + Ok(()) } } @@ -196,8 +217,8 @@ pub struct ConcurrentOutputHandlerFactoryTask { state: Arc>, } -impl Debug for ConcurrentOutputHandlerFactoryTask { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for ConcurrentOutputHandlerFactoryTask { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ConcurrentOutputHandlerFactoryTask") .field("pool", &self.pool) .field("io", &self.io) diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 3c5a00e074c0..e2a678ccdce4 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -3,14 +3,17 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context; use tokio::{sync::watch, task::JoinHandle}; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::L2BlockEnv; -use zksync_state_keeper::{ - BatchExecutor, BatchExecutorHandle, ExecutionMetricsForCriteria, L2BlockParams, - StateKeeperOutputHandler, TxExecutionResult, UpdatesManager, -}; +use zksync_state::OwnedStorage; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber}; +use zksync_vm_interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + L2BlockEnv, +}; -use crate::{metrics::METRICS, storage::StorageLoader, OutputHandlerFactory, VmRunnerIo}; +use crate::{ + metrics::METRICS, output_handler::OutputHandler, storage::StorageLoader, L1BatchOutput, + L2BlockOutput, OutputHandlerFactory, VmRunnerIo, +}; /// VM runner represents a logic layer of L1 batch / L2 block processing flow akin to that of state /// keeper. The difference is that VM runner is designed to be run on batches/blocks that have @@ -29,7 +32,7 @@ pub struct VmRunner { io: Box, loader: Arc, output_handler_factory: Box, - batch_processor: Box, + batch_executor_factory: Box>, } impl VmRunner { @@ -44,32 +47,27 @@ impl VmRunner { io: Box, loader: Arc, output_handler_factory: Box, - batch_processor: Box, + batch_executor_factory: Box>, ) -> Self { Self { pool, io, loader, output_handler_factory, - batch_processor, + batch_executor_factory, } } async fn process_batch( - mut batch_executor: BatchExecutorHandle, + mut batch_executor: Box>, l2_blocks: Vec, - mut updates_manager: UpdatesManager, - mut output_handler: Box, + mut output_handler: Box, ) -> anyhow::Result<()> { let latency = METRICS.run_vm_time.start(); for (i, l2_block) in l2_blocks.into_iter().enumerate() { + let block_env = L2BlockEnv::from_l2_block_data(&l2_block); if i > 0 { // First L2 block in every batch is already preloaded - updates_manager.push_l2_block(L2BlockParams { - timestamp: l2_block.timestamp, - virtual_blocks: l2_block.virtual_blocks, - }); - let block_env = L2BlockEnv::from_l2_block_data(&l2_block); batch_executor .start_next_l2_block(block_env) .await @@ -77,51 +75,36 @@ impl VmRunner { format!("failed starting L2 block with {block_env:?} in batch executor") })?; } + + let mut block_output = L2BlockOutput::default(); for tx in l2_block.txs { let exec_result = batch_executor .execute_tx(tx.clone()) .await .with_context(|| format!("failed executing transaction {:?}", tx.hash()))?; - let TxExecutionResult::Success { - tx_result, - tx_metrics, - call_tracer_result, - compressed_bytecodes, - .. - } = exec_result - else { - anyhow::bail!("Unexpected non-successful transaction"); - }; - let ExecutionMetricsForCriteria { - l1_gas: tx_l1_gas_this_tx, - execution_metrics: tx_execution_metrics, - } = *tx_metrics; - updates_manager.extend_from_executed_transaction( - tx, - *tx_result, - compressed_bytecodes, - tx_l1_gas_this_tx, - tx_execution_metrics, - call_tracer_result, + anyhow::ensure!( + !exec_result.was_halted(), + "Unexpected non-successful transaction" ); + block_output.push(tx, exec_result); } output_handler - .handle_l2_block(&updates_manager) + .handle_l2_block(block_env, &block_output) .await .context("VM runner failed to handle L2 block")?; } - let (finished_batch, storage_view_cache) = batch_executor - .finish_batch_with_cache() + let (batch, storage_view) = batch_executor + .finish_batch() .await - .context("Failed getting storage view cache")?; - updates_manager.finish_batch(finished_batch); - // this is needed for Basic Witness Input Producer to use in memory reads, but not database queries - updates_manager.update_storage_view_cache(storage_view_cache); - + .context("VM runner failed to execute batch tip")?; + let output = L1BatchOutput { + batch, + storage_view_cache: storage_view.cache(), + }; latency.observe(); output_handler - .handle_l1_batch(Arc::new(updates_manager)) + .handle_l1_batch(Arc::new(output)) .await .context("VM runner failed to handle L1 batch")?; Ok(()) @@ -178,16 +161,14 @@ impl VmRunner { tokio::time::sleep(SLEEP_INTERVAL).await; continue; }; - let updates_manager = - UpdatesManager::new(&batch_data.l1_batch_env, &batch_data.system_env); - let batch_executor = self.batch_processor.init_batch( + let batch_executor = self.batch_executor_factory.init_batch( storage, - batch_data.l1_batch_env, - batch_data.system_env, + batch_data.l1_batch_env.clone(), + batch_data.system_env.clone(), ); let output_handler = self .output_handler_factory - .create_handler(next_batch) + .create_handler(batch_data.system_env, batch_data.l1_batch_env) .await?; self.io @@ -196,7 +177,6 @@ impl VmRunner { let handle = tokio::task::spawn(Self::process_batch( batch_executor, batch_data.l2_blocks, - updates_manager, output_handler, )); task_handles.push((next_batch, handle)); diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index e351b09ad2bf..baee426007c5 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -9,13 +9,13 @@ use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::{watch, RwLock}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; +use zksync_vm_interface::{L1BatchEnv, SystemEnv}; use crate::{metrics::METRICS, VmRunnerIo}; @@ -37,6 +37,69 @@ pub trait StorageLoader: 'static + Send + Sync + fmt::Debug { ) -> anyhow::Result>; } +/// Simplified storage loader that always gets data from Postgres (i.e., doesn't do RocksDB caching). +#[derive(Debug)] +pub(crate) struct PostgresLoader { + pool: ConnectionPool, + l1_batch_params_provider: L1BatchParamsProvider, + chain_id: L2ChainId, + shadow_snapshots: bool, +} + +impl PostgresLoader { + pub async fn new(pool: ConnectionPool, chain_id: L2ChainId) -> anyhow::Result { + let mut l1_batch_params_provider = L1BatchParamsProvider::new(); + let mut conn = pool.connection().await?; + l1_batch_params_provider.initialize(&mut conn).await?; + Ok(Self { + pool, + l1_batch_params_provider, + chain_id, + shadow_snapshots: true, + }) + } + + /// Enables or disables snapshot storage shadowing. + pub fn shadow_snapshots(&mut self, shadow_snapshots: bool) { + self.shadow_snapshots = shadow_snapshots; + } +} + +#[async_trait] +impl StorageLoader for PostgresLoader { + #[tracing::instrument(skip_all, l1_batch_number = l1_batch_number.0)] + async fn load_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let mut conn = self.pool.connection().await?; + let Some(data) = load_batch_execute_data( + &mut conn, + l1_batch_number, + &self.l1_batch_params_provider, + self.chain_id, + ) + .await? + else { + return Ok(None); + }; + + if let Some(snapshot) = OwnedStorage::snapshot(&mut conn, l1_batch_number).await? { + let postgres = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; + let storage = snapshot.with_fallback(postgres, self.shadow_snapshots); + let storage = OwnedStorage::from(storage); + return Ok(Some((data, storage))); + } + + tracing::info!( + "Incomplete data to create storage snapshot for batch; will use sequential storage" + ); + let conn = self.pool.connection().await?; + let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; + Ok(Some((data, storage.into()))) + } +} + /// Data needed to execute an L1 batch. #[derive(Debug, Clone)] pub struct BatchExecuteData { @@ -142,7 +205,7 @@ impl StorageLoader for VmRunnerStorage { return Ok(if let Some(data) = batch_data { let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; - Some((data, storage)) + Some((data, storage.into())) } else { None }); diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index dd14e4dd1b0e..530016408140 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -3,30 +3,26 @@ use std::{collections::HashMap, ops, sync::Arc, time::Duration}; use async_trait::async_trait; use rand::{prelude::SliceRandom, Rng}; use tokio::sync::RwLock; -use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_node_genesis::GenesisParams; use zksync_node_test_utils::{ create_l1_batch_metadata, create_l2_block, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, }; -use zksync_state::OwnedStorage; -use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; use zksync_test_account::Account; use zksync_types::{ - block::{BlockGasCount, L1BatchHeader, L2BlockHasher}, + block::{L1BatchHeader, L2BlockHasher}, fee::Fee, get_intrinsic_constants, l2::L2Tx, utils::storage_key_for_standard_token_balance, - AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, - StorageKey, StorageLog, StorageLogKind, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, + AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, + StorageLog, StorageLogKind, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics}; -use super::{BatchExecuteData, OutputHandlerFactory, VmRunnerIo}; -use crate::storage::{load_batch_execute_data, StorageLoader}; +use super::*; mod output_handler; mod playground; @@ -36,33 +32,6 @@ mod storage_writer; const TEST_TIMEOUT: Duration = Duration::from_secs(10); -/// Simplified storage loader that always gets data from Postgres (i.e., doesn't do RocksDB caching). -#[derive(Debug)] -struct PostgresLoader(ConnectionPool); - -#[async_trait] -impl StorageLoader for PostgresLoader { - async fn load_batch( - &self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let mut conn = self.0.connection().await?; - let Some(data) = load_batch_execute_data( - &mut conn, - l1_batch_number, - &L1BatchParamsProvider::new(), - L2ChainId::default(), - ) - .await? - else { - return Ok(None); - }; - - let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; - Ok(Some((data, storage))) - } -} - #[derive(Debug, Default)] struct IoMock { current: L1BatchNumber, @@ -185,25 +154,27 @@ struct TestOutputFactory { impl OutputHandlerFactory for TestOutputFactory { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { #[derive(Debug)] struct TestOutputHandler { delay: Option, } #[async_trait] - impl StateKeeperOutputHandler for TestOutputHandler { + impl OutputHandler for TestOutputHandler { async fn handle_l2_block( &mut self, - _updates_manager: &UpdatesManager, + _env: L2BlockEnv, + _output: &L2BlockOutput, ) -> anyhow::Result<()> { Ok(()) } async fn handle_l1_batch( - &mut self, - _updates_manager: Arc, + self: Box, + _output: Arc, ) -> anyhow::Result<()> { if let Some(delay) = self.delay { tokio::time::sleep(delay).await @@ -212,7 +183,7 @@ impl OutputHandlerFactory for TestOutputFactory { } } - let delay = self.delays.get(&l1_batch_number).copied(); + let delay = self.delays.get(&l1_batch_env.number).copied(); Ok(Box::new(TestOutputHandler { delay })) } } @@ -244,7 +215,7 @@ pub fn create_l2_transaction( async fn store_l1_batches( conn: &mut Connection<'_, Core>, numbers: ops::RangeInclusive, - contract_hashes: BaseSystemContractsHashes, + genesis_params: &GenesisParams, accounts: &mut [Account], ) -> anyhow::Result> { let mut rng = rand::thread_rng(); @@ -308,7 +279,7 @@ async fn store_l1_batches( digest.push_tx_hash(tx.hash()); new_l2_block.hash = digest.finalize(ProtocolVersionId::latest()); - new_l2_block.base_system_contracts_hashes = contract_hashes; + new_l2_block.base_system_contracts_hashes = genesis_params.base_system_contracts().hashes(); new_l2_block.l2_tx_count = 1; conn.blocks_dal().insert_l2_block(&new_l2_block).await?; last_l2_block_hash = new_l2_block.hash; @@ -337,20 +308,24 @@ async fn store_l1_batches( last_l2_block_hash = fictive_l2_block.hash; l2_block_number += 1; - let header = L1BatchHeader::new( + let mut header = L1BatchHeader::new( l1_batch_number, l2_block_number.0 as u64 - 2, // Matches the first L2 block in the batch - BaseSystemContractsHashes::default(), + genesis_params.base_system_contracts().hashes(), ProtocolVersionId::default(), ); - let predicted_gas = BlockGasCount { - commit: 2, - prove: 3, - execute: 10, - }; - conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) - .await?; + + // Conservatively assume that the bootloader / transactions touch *all* system contracts + default AA. + // By convention, bootloader hash isn't included into `used_contract_hashes`. + header.used_contract_hashes = genesis_params + .system_contracts() + .iter() + .map(|contract| hash_bytecode(&contract.bytecode)) + .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .map(h256_to_u256) + .collect(); + + conn.blocks_dal().insert_mock_l1_batch(&header).await?; conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch_number) .await?; diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 453507328c4f..1bf30effdbe5 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -6,13 +6,13 @@ use tokio::{ }; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; -use zksync_state_keeper::UpdatesManager; +use zksync_state::interface::StorageViewCache; use zksync_types::L1BatchNumber; +use zksync_vm_interface::{FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use crate::{ tests::{wait, IoMock, TestOutputFactory}, - ConcurrentOutputHandlerFactory, OutputHandlerFactory, + ConcurrentOutputHandlerFactory, L1BatchOutput, L2BlockOutput, OutputHandlerFactory, }; struct OutputHandlerTester { @@ -40,47 +40,53 @@ impl OutputHandlerTester { } async fn spawn_test_task(&mut self, l1_batch_number: L1BatchNumber) -> anyhow::Result<()> { - let mut output_handler = self.output_factory.create_handler(l1_batch_number).await?; - let join_handle = tokio::task::spawn(async move { - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: Default::default(), + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: l1_batch_number, + timestamp: 0, + fee_input: Default::default(), + fee_account: Default::default(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 0, timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: Default::default(), - max_virtual_blocks_to_create: 0, + prev_block_hash: Default::default(), + max_virtual_blocks_to_create: 0, + }, + }; + let system_env = SystemEnv { + zk_porter_available: false, + version: Default::default(), + base_system_smart_contracts: BaseSystemContracts { + bootloader: SystemContractCode { + code: vec![], + hash: Default::default(), }, - }; - let system_env = SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![], - hash: Default::default(), - }, - default_aa: SystemContractCode { - code: vec![], - hash: Default::default(), - }, + default_aa: SystemContractCode { + code: vec![], + hash: Default::default(), }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }; - let updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + }, + bootloader_gas_limit: 0, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: 0, + chain_id: Default::default(), + }; + + let mut output_handler = self + .output_factory + .create_handler(system_env, l1_batch_env.clone()) + .await?; + let join_handle = tokio::task::spawn(async move { output_handler - .handle_l2_block(&updates_manager) + .handle_l2_block(l1_batch_env.first_l2_block, &L2BlockOutput::default()) .await .unwrap(); output_handler - .handle_l1_batch(Arc::new(updates_manager)) + .handle_l1_batch(Arc::new(L1BatchOutput { + batch: FinishedL1Batch::mock(), + storage_view_cache: StorageViewCache::default(), + })) .await .unwrap(); }); diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs index 2f3caf1f85c7..aaaf4b45b1a4 100644 --- a/core/node/vm_runner/src/tests/playground.rs +++ b/core/node/vm_runner/src/tests/playground.rs @@ -8,9 +8,21 @@ use zksync_state::RocksdbStorage; use zksync_types::vm::FastVmMode; use super::*; -use crate::impls::{VmPlayground, VmPlaygroundCursorOptions, VmPlaygroundTasks}; +use crate::impls::{ + VmPlayground, VmPlaygroundCursorOptions, VmPlaygroundStorageOptions, VmPlaygroundTasks, +}; -async fn setup_storage(pool: &ConnectionPool, batch_count: u32) -> GenesisParams { +impl From<&tempfile::TempDir> for VmPlaygroundStorageOptions { + fn from(dir: &tempfile::TempDir) -> Self { + Self::Rocksdb(dir.path().to_str().unwrap().into()) + } +} + +async fn setup_storage( + pool: &ConnectionPool, + batch_count: u32, + insert_protective_reads: bool, +) -> GenesisParams { let mut conn = pool.connection().await.unwrap(); let genesis_params = GenesisParams::mock(); if !conn.blocks_dal().is_genesis_needed().await.unwrap() { @@ -24,35 +36,46 @@ async fn setup_storage(pool: &ConnectionPool, batch_count: u32) -> Genesis // Generate some batches and persist them in Postgres let mut accounts = [Account::random()]; fund(&mut conn, &accounts).await; - store_l1_batches( - &mut conn, - 1..=batch_count, - genesis_params.base_system_contracts().hashes(), - &mut accounts, - ) - .await - .unwrap(); + store_l1_batches(&mut conn, 1..=batch_count, &genesis_params, &mut accounts) + .await + .unwrap(); // Fill in missing storage logs for all batches so that running VM for all of them works correctly. - storage_writer::write_storage_logs(pool.clone()).await; + storage_writer::write_storage_logs(pool.clone(), insert_protective_reads).await; genesis_params } +#[derive(Debug, Clone, Copy)] +enum StorageLoaderKind { + Cached, + Postgres, + Snapshot, +} + +impl StorageLoaderKind { + const ALL: [Self; 3] = [Self::Cached, Self::Postgres, Self::Snapshot]; +} + async fn run_playground( pool: ConnectionPool, - rocksdb_dir: &tempfile::TempDir, + storage: VmPlaygroundStorageOptions, reset_to: Option, ) { - let genesis_params = setup_storage(&pool, 5).await; + let insert_protective_reads = matches!( + storage, + VmPlaygroundStorageOptions::Snapshots { shadow: true } + ); + let genesis_params = setup_storage(&pool, 5, insert_protective_reads).await; let cursor = VmPlaygroundCursorOptions { first_processed_batch: reset_to.unwrap_or(L1BatchNumber(0)), window_size: NonZeroU32::new(1).unwrap(), reset_state: reset_to.is_some(), }; + let (playground, playground_tasks) = VmPlayground::new( pool.clone(), FastVmMode::Shadow, - rocksdb_dir.path().to_str().unwrap().to_owned(), + storage, genesis_params.config().l2_chain_id, cursor, ) @@ -91,15 +114,17 @@ async fn wait_for_all_batches( let playground_io = playground.io().clone(); let mut completed_batches = playground_io.subscribe_to_completed_batches(); - let task_handles = [ - tokio::spawn(playground_tasks.loader_task.run(stop_receiver.clone())), + let mut task_handles = vec![ tokio::spawn( playground_tasks .output_handler_factory_task .run(stop_receiver.clone()), ), - tokio::spawn(async move { playground.run(&stop_receiver).await }), + tokio::spawn(playground.run(stop_receiver.clone())), ]; + if let Some(loader_task) = playground_tasks.loader_task { + task_handles.push(tokio::spawn(loader_task.run(stop_receiver))); + } // Wait until all batches are processed. let last_batch_number = conn @@ -149,14 +174,40 @@ async fn wait_for_all_batches( async fn vm_playground_basics(reset_state: bool) { let pool = ConnectionPool::test_pool().await; let rocksdb_dir = tempfile::TempDir::new().unwrap(); - run_playground(pool, &rocksdb_dir, reset_state.then_some(L1BatchNumber(0))).await; + run_playground( + pool, + VmPlaygroundStorageOptions::from(&rocksdb_dir), + reset_state.then_some(L1BatchNumber(0)), + ) + .await; } +#[test_casing(2, [false, true])] #[tokio::test] -async fn starting_from_non_zero_batch() { +async fn vm_playground_basics_without_cache(reset_state: bool) { let pool = ConnectionPool::test_pool().await; - let rocksdb_dir = tempfile::TempDir::new().unwrap(); - run_playground(pool, &rocksdb_dir, Some(L1BatchNumber(3))).await; + run_playground( + pool, + VmPlaygroundStorageOptions::Snapshots { shadow: false }, + reset_state.then_some(L1BatchNumber(0)), + ) + .await; +} + +#[test_casing(3, StorageLoaderKind::ALL)] +#[tokio::test] +async fn starting_from_non_zero_batch(storage_loader_kind: StorageLoaderKind) { + let pool = ConnectionPool::test_pool().await; + let rocksdb_dir; + let storage_loader = match storage_loader_kind { + StorageLoaderKind::Cached => { + rocksdb_dir = tempfile::TempDir::new().unwrap(); + VmPlaygroundStorageOptions::from(&rocksdb_dir) + } + StorageLoaderKind::Postgres => VmPlaygroundStorageOptions::Snapshots { shadow: false }, + StorageLoaderKind::Snapshot => VmPlaygroundStorageOptions::Snapshots { shadow: true }, + }; + run_playground(pool, storage_loader, Some(L1BatchNumber(3))).await; } #[test_casing(2, [L1BatchNumber(0), L1BatchNumber(2)])] @@ -164,7 +215,12 @@ async fn starting_from_non_zero_batch() { async fn resetting_playground_state(reset_to: L1BatchNumber) { let pool = ConnectionPool::test_pool().await; let rocksdb_dir = tempfile::TempDir::new().unwrap(); - run_playground(pool.clone(), &rocksdb_dir, None).await; + run_playground( + pool.clone(), + VmPlaygroundStorageOptions::from(&rocksdb_dir), + None, + ) + .await; // Manually catch up RocksDB to Postgres to ensure that resetting it is not trivial. let (_stop_sender, stop_receiver) = watch::channel(false); @@ -176,7 +232,12 @@ async fn resetting_playground_state(reset_to: L1BatchNumber) { .await .unwrap(); - run_playground(pool.clone(), &rocksdb_dir, Some(reset_to)).await; + run_playground( + pool.clone(), + VmPlaygroundStorageOptions::from(&rocksdb_dir), + Some(reset_to), + ) + .await; } #[test_casing(2, [2, 3])] @@ -186,7 +247,7 @@ async fn using_larger_window_size(window_size: u32) { let pool = ConnectionPool::test_pool().await; let rocksdb_dir = tempfile::TempDir::new().unwrap(); - let genesis_params = setup_storage(&pool, 5).await; + let genesis_params = setup_storage(&pool, 5, false).await; let cursor = VmPlaygroundCursorOptions { first_processed_batch: L1BatchNumber(0), window_size: NonZeroU32::new(window_size).unwrap(), @@ -195,7 +256,7 @@ async fn using_larger_window_size(window_size: u32) { let (playground, playground_tasks) = VmPlayground::new( pool.clone(), FastVmMode::Shadow, - rocksdb_dir.path().to_str().unwrap().to_owned(), + VmPlaygroundStorageOptions::from(&rocksdb_dir), genesis_params.config().l2_chain_id, cursor, ) diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index 7ea1335db71f..fec3fd2ba60a 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -5,9 +5,9 @@ use test_casing::test_casing; use tokio::sync::{watch, RwLock}; use zksync_dal::{ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_state_keeper::MainBatchExecutor; use zksync_test_account::Account; use zksync_types::{L1BatchNumber, L2ChainId}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::*; use crate::{ConcurrentOutputHandlerFactory, VmRunner, VmRunnerStorage}; @@ -25,17 +25,11 @@ async fn process_batches((batch_count, window): (u32, u32)) -> anyhow::Result<() let mut accounts = vec![Account::random(), Account::random()]; fund(&mut conn, &accounts).await; - store_l1_batches( - &mut conn, - 1..=batch_count, - genesis_params.base_system_contracts().hashes(), - &mut accounts, - ) - .await?; + store_l1_batches(&mut conn, 1..=batch_count, &genesis_params, &mut accounts).await?; drop(conn); // Fill in missing storage logs for all batches so that running VM for all of them works correctly. - storage_writer::write_storage_logs(connection_pool.clone()).await; + storage_writer::write_storage_logs(connection_pool.clone(), true).await; let io = Arc::new(RwLock::new(IoMock { current: 0.into(), @@ -60,7 +54,7 @@ async fn process_batches((batch_count, window): (u32, u32)) -> anyhow::Result<() tokio::task::spawn(async move { task.run(output_stop_receiver).await.unwrap() }); let storage = Arc::new(storage); - let batch_executor = MainBatchExecutor::new(false, false); + let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( connection_pool, Box::new(io.clone()), diff --git a/core/node/vm_runner/src/tests/storage.rs b/core/node/vm_runner/src/tests/storage.rs index f6f7a2ba9e64..838b469f0ef3 100644 --- a/core/node/vm_runner/src/tests/storage.rs +++ b/core/node/vm_runner/src/tests/storage.rs @@ -115,7 +115,7 @@ async fn rerun_storage_on_existing_data() -> anyhow::Result<()> { let batches = store_l1_batches( &mut connection_pool.connection().await?, 1..=10, - genesis_params.base_system_contracts().hashes(), + &genesis_params, &mut accounts, ) .await?; @@ -212,7 +212,7 @@ async fn continuously_load_new_batches() -> anyhow::Result<()> { store_l1_batches( &mut connection_pool.connection().await?, 1..=1, - genesis_params.base_system_contracts().hashes(), + &genesis_params, &mut accounts, ) .await?; @@ -230,7 +230,7 @@ async fn continuously_load_new_batches() -> anyhow::Result<()> { store_l1_batches( &mut connection_pool.connection().await?, 2..=2, - genesis_params.base_system_contracts().hashes(), + &genesis_params, &mut accounts, ) .await?; @@ -266,7 +266,7 @@ async fn access_vm_runner_storage() -> anyhow::Result<()> { store_l1_batches( &mut connection_pool.connection().await?, batch_range, - genesis_params.base_system_contracts().hashes(), + &genesis_params, &mut accounts, ) .await?; diff --git a/core/node/vm_runner/src/tests/storage_writer.rs b/core/node/vm_runner/src/tests/storage_writer.rs index 4c7a6e0d6612..76d0867125a8 100644 --- a/core/node/vm_runner/src/tests/storage_writer.rs +++ b/core/node/vm_runner/src/tests/storage_writer.rs @@ -1,14 +1,23 @@ +use assert_matches::assert_matches; +use test_casing::test_casing; use tokio::sync::watch; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_state_keeper::MainBatchExecutor; +use zksync_state::OwnedStorage; +use zksync_types::{L2ChainId, StorageLogWithPreviousValue}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::*; -use crate::{ConcurrentOutputHandlerFactory, VmRunner}; +use crate::{ + storage::{PostgresLoader, StorageLoader}, + ConcurrentOutputHandlerFactory, L1BatchOutput, L2BlockOutput, OutputHandler, VmRunner, +}; #[derive(Debug, Clone)] struct StorageWriterIo { + last_processed_block: L2BlockNumber, last_processed_batch: Arc>, pool: ConnectionPool, + insert_protective_reads: bool, } impl StorageWriterIo { @@ -64,43 +73,43 @@ impl VmRunnerIo for StorageWriterIo { impl StorageWriterIo { async fn write_storage_logs( conn: &mut Connection<'_, Core>, - updates_manager: &UpdatesManager, + block_number: L2BlockNumber, + storage_logs: impl Iterator, ) -> anyhow::Result<()> { - let storage_logs = updates_manager - .l2_block - .storage_logs - .iter() - .filter_map(|log| log.log.is_write().then_some(log.log)); + let storage_logs = storage_logs.filter_map(|log| log.log.is_write().then_some(log.log)); let storage_logs: Vec<_> = storage_logs.collect(); conn.storage_logs_dal() - .append_storage_logs(updates_manager.l2_block.number, &storage_logs) + .append_storage_logs(block_number, &storage_logs) .await?; Ok(()) } } #[async_trait] -impl StateKeeperOutputHandler for StorageWriterIo { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for StorageWriterIo { + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + output: &L2BlockOutput, + ) -> anyhow::Result<()> { let mut conn = self.pool.connection().await?; - Self::write_storage_logs(&mut conn, updates_manager).await?; + let storage_logs = output + .transactions + .iter() + .flat_map(|(_, exec_result)| &exec_result.tx_result.logs.storage_logs); + let block_number = L2BlockNumber(env.number); + Self::write_storage_logs(&mut conn, block_number, storage_logs).await?; + self.last_processed_block = block_number; Ok(()) } - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { let mut conn = self.pool.connection().await?; // Storage logs are added to the fictive block *after* `handle_l2_block()` is called for it, so we need to call it again here. - Self::write_storage_logs(&mut conn, &updates_manager).await?; - - let finished_batch = updates_manager - .l1_batch - .finished - .as_ref() - .expect("L1 batch is not finished"); - let state_diffs = finished_batch.state_diffs.as_ref().expect("no state diffs"); + let storage_logs = &output.batch.block_tip_execution_result.logs.storage_logs; + Self::write_storage_logs(&mut conn, self.last_processed_block, storage_logs.iter()).await?; + + let state_diffs = output.batch.state_diffs.as_ref().expect("no state diffs"); let initial_writes: Vec<_> = state_diffs .iter() .filter(|diff| diff.is_write_initial()) @@ -111,12 +120,26 @@ impl StateKeeperOutputHandler for StorageWriterIo { )) }) .collect(); + let l1_batch_number = *self.last_processed_batch.borrow() + 1; conn.storage_logs_dedup_dal() - .insert_initial_writes(updates_manager.l1_batch.number, &initial_writes) + .insert_initial_writes(l1_batch_number, &initial_writes) .await?; - self.last_processed_batch - .send_replace(updates_manager.l1_batch.number); + if self.insert_protective_reads { + let protective_reads: Vec<_> = output + .batch + .final_execution_state + .deduplicated_storage_logs + .iter() + .filter(|log_query| !log_query.is_write()) + .copied() + .collect(); + conn.storage_logs_dedup_dal() + .insert_protective_reads(l1_batch_number, &protective_reads) + .await?; + } + + self.last_processed_batch.send_replace(l1_batch_number); Ok(()) } } @@ -125,16 +148,17 @@ impl StateKeeperOutputHandler for StorageWriterIo { impl OutputHandlerFactory for StorageWriterIo { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - assert_eq!(l1_batch_number, self.batch() + 1); + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { + assert_eq!(l1_batch_env.number, self.batch() + 1); Ok(Box::new(self.clone())) } } /// Writes missing storage logs into Postgres by executing all transactions from it. Useful both for testing `VmRunner`, /// and to fill the storage for multi-batch tests for other components. -pub(super) async fn write_storage_logs(pool: ConnectionPool) { +pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protective_reads: bool) { let mut conn = pool.connection().await.unwrap(); let sealed_batch = conn .blocks_dal() @@ -145,13 +169,18 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool) { drop(conn); let io = Box::new(StorageWriterIo { last_processed_batch: Arc::new(watch::channel(L1BatchNumber(0)).0), + last_processed_block: L2BlockNumber(0), pool: pool.clone(), + insert_protective_reads, }); let mut processed_batch = io.last_processed_batch.subscribe(); - let loader = Arc::new(PostgresLoader(pool.clone())); - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); - let vm_runner = VmRunner::new(pool, io.clone(), loader, io, batch_executor); + let loader = PostgresLoader::new(pool.clone(), L2ChainId::default()) + .await + .unwrap(); + let loader = Arc::new(loader); + let batch_executor = MainBatchExecutorFactory::new(false, false); + let vm_runner = VmRunner::new(pool, io.clone(), loader, io, Box::new(batch_executor)); let (stop_sender, stop_receiver) = watch::channel(false); let vm_runner_handle = tokio::spawn(async move { vm_runner.run(&stop_receiver).await }); @@ -163,8 +192,9 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool) { vm_runner_handle.await.unwrap().unwrap(); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn storage_writer_works() { +async fn storage_writer_works(insert_protective_reads: bool) { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); let genesis_params = GenesisParams::mock(); @@ -174,17 +204,12 @@ async fn storage_writer_works() { let mut accounts = [Account::random()]; fund(&mut conn, &accounts).await; - store_l1_batches( - &mut conn, - 1..=5, - genesis_params.base_system_contracts().hashes(), - &mut accounts, - ) - .await - .unwrap(); + store_l1_batches(&mut conn, 1..=5, &genesis_params, &mut accounts) + .await + .unwrap(); drop(conn); - write_storage_logs(pool.clone()).await; + write_storage_logs(pool.clone(), insert_protective_reads).await; // Re-run the VM on all batches to check that storage logs are persisted correctly let (stop_sender, stop_receiver) = watch::channel(false); @@ -192,17 +217,33 @@ async fn storage_writer_works() { current: L1BatchNumber(0), max: 5, })); - let loader = Arc::new(PostgresLoader(pool.clone())); + let loader = PostgresLoader::new(pool.clone(), genesis_params.config().l2_chain_id) + .await + .unwrap(); + let loader = Arc::new(loader); + + // Check that the loader returns expected types of storage. + let (_, batch_storage) = loader + .load_batch(L1BatchNumber(1)) + .await + .unwrap() + .expect("no batch loaded"); + if insert_protective_reads { + assert_matches!(batch_storage, OwnedStorage::Snapshot(_)); + } else { + assert_matches!(batch_storage, OwnedStorage::Postgres(_)); + } + let (output_factory, output_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), TestOutputFactory::default()); let output_factory_handle = tokio::spawn(output_factory_task.run(stop_receiver.clone())); - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io.clone()), loader, Box::new(output_factory), - batch_executor, + Box::new(batch_executor), ); let vm_runner_handle = tokio::spawn(async move { vm_runner.run(&stop_receiver).await }); diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 5dcd5167165e..0f418bf12676 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -5,6 +5,7 @@ use std::{ }; use futures::{channel::mpsc, SinkExt}; +use rand::Rng; use tokio::sync::RwLock; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; @@ -75,6 +76,8 @@ pub struct AccountLifespan { inflight_txs: VecDeque, /// Current account nonce, it is None at the beginning and will be set after the first transaction current_nonce: Option, + /// Randomly assigned polling interval. + polling_interval: Duration, } impl AccountLifespan { @@ -82,11 +85,12 @@ impl AccountLifespan { config: &LoadtestConfig, contract_execution_params: LoadnextContractExecutionParams, addresses: AddressPool, - test_account: TestWallet, + mut test_account: TestWallet, report_sink: mpsc::Sender, main_l2_token: Address, paymaster_address: Address, ) -> Self { + let polling_interval = test_account.rng.gen_range(POLLING_INTERVAL); Self { wallet: test_account, config: config.clone(), @@ -99,6 +103,7 @@ impl AccountLifespan { report_sink, inflight_txs: Default::default(), current_nonce: None, + polling_interval, } } @@ -132,7 +137,7 @@ impl AccountLifespan { self.execute_command(deploy_command.clone()).await?; self.wait_for_all_inflight_tx().await?; - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); let mut l1_tx_count = 0; loop { let command = self.generate_command(); @@ -157,7 +162,7 @@ impl AccountLifespan { } async fn wait_for_all_inflight_tx(&mut self) -> Result<(), Aborted> { - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); while !self.inflight_txs.is_empty() { timer.tick().await; self.check_inflight_txs().await?; diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index 7b5e277e139b..3fa3141553cd 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -101,7 +101,9 @@ impl AccountPool { .context("invalid L2 RPC URL")?, )? .for_network(l2_chain_id.into()) + .report_config(false) .build(); + // Perform a health check: check whether ZKsync server is alive. let mut server_alive = false; for _ in 0usize..3 { diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index a9648edb00ae..ab578ecfdc6b 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use tokio::sync::Semaphore; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{network::Network, Address, L2ChainId, H160}; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; use crate::fs_utils::read_tokens; @@ -190,7 +190,7 @@ fn default_main_token() -> H160 { } fn default_test_contracts_path() -> PathBuf { - let test_contracts_path = workspace_dir_or_current_dir().join("etc/contracts-test-data"); + let test_contracts_path = Workspace::locate().core().join("etc/contracts-test-data"); tracing::info!("Test contracts path: {}", test_contracts_path.display()); test_contracts_path } diff --git a/core/tests/loadnext/src/constants.rs b/core/tests/loadnext/src/constants.rs index 7ac66ab7e1e7..6b989b16feb1 100644 --- a/core/tests/loadnext/src/constants.rs +++ b/core/tests/loadnext/src/constants.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{ops, time::Duration}; /// Normally, block is committed on Ethereum every 15 seconds; however there are no guarantees that our transaction /// will be included in the next block right after sending. @@ -14,7 +14,7 @@ pub const ETH_POLLING_INTERVAL: Duration = Duration::from_secs(10); pub const COMMIT_TIMEOUT: Duration = Duration::from_secs(600); /// We don't want to overload the server with too many requests; given the fact that blocks are expected to be created /// every couple of seconds, chosen value seems to be adequate to provide the result in one or two calls at average. -pub const POLLING_INTERVAL: Duration = Duration::from_secs(3); +pub const POLLING_INTERVAL: ops::Range = Duration::from_secs(2)..Duration::from_secs(3); pub const MAX_OUTSTANDING_NONCE: usize = 20; diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index a573583ed318..43a1be164b64 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -244,7 +244,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -313,7 +313,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -463,7 +463,7 @@ impl Executor { // Wait for transactions to be committed, if at least one of them fails, // return error. for mut handle in handles { - handle.polling_interval(POLLING_INTERVAL).unwrap(); + handle.polling_interval(POLLING_INTERVAL.end).unwrap(); let result = handle .commit_timeout(COMMIT_TIMEOUT) diff --git a/core/tests/loadnext/src/fs_utils.rs b/core/tests/loadnext/src/fs_utils.rs index 8af9df8afee7..c4472a00531c 100644 --- a/core/tests/loadnext/src/fs_utils.rs +++ b/core/tests/loadnext/src/fs_utils.rs @@ -5,7 +5,7 @@ use std::{fs::File, io::BufReader, path::Path}; use serde::Deserialize; use zksync_types::{ethabi::Contract, network::Network, Address}; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; /// A token stored in `etc/tokens/{network}.json` files. #[derive(Debug, Deserialize)] @@ -27,7 +27,7 @@ pub struct TestContract { } pub fn read_tokens(network: Network) -> anyhow::Result> { - let home = workspace_dir_or_current_dir(); + let home = Workspace::locate().core(); let path = home.join(format!("etc/tokens/{network}.json")); let file = File::open(path)?; let reader = BufReader::new(file); diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 4b7bb00a3080..4557c2c43200 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -102,6 +102,7 @@ impl EthereumProvider { let query_client = Client::http(eth_web3_url) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(sl_chain_id.into()) + .report_config(false) .build(); let query_client: Box> = Box::new(query_client); let eth_client = SigningClient::new( diff --git a/core/tests/loadnext/src/sdk/wallet.rs b/core/tests/loadnext/src/sdk/wallet.rs index 9d3bd73a9bf2..551d0d8e385f 100644 --- a/core/tests/loadnext/src/sdk/wallet.rs +++ b/core/tests/loadnext/src/sdk/wallet.rs @@ -45,6 +45,7 @@ where let client = Client::http(rpc_address) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(signer.chain_id.into()) + .report_config(false) .build(); Ok(Wallet { diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 5fbac69ace6e..6599e7c5d298 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -11,6 +11,7 @@ import * as ethers from 'ethers'; import path from 'node:path'; import { expect } from 'chai'; import { runExternalNodeInBackground } from './utils'; +import { killPidWithAllChilds } from 'utils/build/kill'; export interface Health { readonly status: string; @@ -83,9 +84,11 @@ export async function getExternalNodeHealth(url: string) { } } -export async function dropNodeData(useZkSupervisor: boolean, env: { [key: string]: string }) { +export async function dropNodeData(env: { [key: string]: string }, useZkSupervisor?: boolean, chain?: string) { if (useZkSupervisor) { - await executeNodeCommand(env, 'zk_inception external-node init'); + let cmd = 'zk_inception external-node init'; + cmd += chain ? ` --chain ${chain}` : ''; + await executeNodeCommand(env, cmd); } else { await executeNodeCommand(env, 'zk db reset'); await executeNodeCommand(env, 'zk clean --database'); @@ -100,7 +103,7 @@ async function executeNodeCommand(env: { [key: string]: string }, command: strin env }); try { - await waitForProcess(childProcess, true); + await waitForProcess(childProcess); } finally { childProcess.kill(); } @@ -110,11 +113,11 @@ export async function executeCommandWithLogs(command: string, logsPath: string) const logs = await fs.open(logsPath, 'w'); const childProcess = spawn(command, { cwd: process.env.ZKSYNC_HOME!!, - stdio: [null, logs.fd, logs.fd], + stdio: ['ignore', logs.fd, logs.fd], shell: true }); try { - await waitForProcess(childProcess, true); + await waitForProcess(childProcess); } finally { childProcess.kill(); await logs.close(); @@ -145,21 +148,46 @@ export class NodeProcess { } } + async stop(signal: 'INT' | 'KILL' = 'INT') { + interface ChildProcessError extends Error { + readonly code: number | null; + } + + let signalNumber; + if (signal == 'KILL') { + signalNumber = 9; + } else { + signalNumber = 15; + } + try { + await killPidWithAllChilds(this.childProcess.pid!, signalNumber); + } catch (err) { + const typedErr = err as ChildProcessError; + if (typedErr.code === 1) { + // No matching processes were found; this is fine. + } else { + throw err; + } + } + } + static async spawn( env: { [key: string]: string }, logsFile: FileHandle | string, pathToHome: string, - useZkInception: boolean, - components: NodeComponents = NodeComponents.STANDARD + components: NodeComponents = NodeComponents.STANDARD, + useZkInception?: boolean, + chain?: string ) { - const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'w') : logsFile; + const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; let childProcess = runExternalNodeInBackground({ components: [components], - stdio: [null, logs.fd, logs.fd], + stdio: ['ignore', logs.fd, logs.fd], cwd: pathToHome, env, - useZkInception + useZkInception, + chain }); return new NodeProcess(childProcess, logs); @@ -172,22 +200,26 @@ export class NodeProcess { } async stopAndWait(signal: 'INT' | 'KILL' = 'INT') { - await NodeProcess.stopAll(signal); - await waitForProcess(this.childProcess, signal === 'INT'); + let processWait = waitForProcess(this.childProcess); + await this.stop(signal); + await processWait; + console.log('stopped'); } } -async function waitForProcess(childProcess: ChildProcess, checkExitCode: boolean) { - await new Promise((resolve, reject) => { +function waitForProcess(childProcess: ChildProcess): Promise { + return new Promise((resolve, reject) => { + childProcess.on('close', (_code, _signal) => { + resolve(undefined); + }); childProcess.on('error', (error) => { reject(error); }); - childProcess.on('exit', (code) => { - if (!checkExitCode || code === 0) { - resolve(undefined); - } else { - reject(new Error(`Process exited with non-zero code: ${code}`)); - } + childProcess.on('exit', (_code) => { + resolve(undefined); + }); + childProcess.on('disconnect', () => { + resolve(undefined); }); }); } @@ -197,11 +229,16 @@ async function waitForProcess(childProcess: ChildProcess, checkExitCode: boolean */ export class FundedWallet { static async create(mainNode: zksync.Provider, eth: ethers.Provider): Promise { - const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant/eth.json`); - const ethTestConfig = JSON.parse(await fs.readFile(testConfigPath, { encoding: 'utf-8' })); - const mnemonic = ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic); - const walletHD = ethers.HDNodeWallet.fromMnemonic(mnemonic, "m/44'/60'/0'/0/0"); - const wallet = new zksync.Wallet(walletHD.privateKey, mainNode, eth); + if (!process.env.MASTER_WALLET_PK) { + const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant/eth.json`); + const ethTestConfig = JSON.parse(await fs.readFile(testConfigPath, { encoding: 'utf-8' })); + const mnemonic = ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic); + const walletHD = ethers.HDNodeWallet.fromMnemonic(mnemonic, "m/44'/60'/0'/0/0"); + + process.env.MASTER_WALLET_PK = walletHD.privateKey; + } + + const wallet = new zksync.Wallet(process.env.MASTER_WALLET_PK, mainNode, eth); return new FundedWallet(wallet); } diff --git a/core/tests/recovery-test/src/utils.ts b/core/tests/recovery-test/src/utils.ts index cfec302e94f4..98c6b6d4405c 100644 --- a/core/tests/recovery-test/src/utils.ts +++ b/core/tests/recovery-test/src/utils.ts @@ -48,17 +48,20 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception + useZkInception, + chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; useZkInception?: boolean; + chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; if (useZkInception) { command = 'zk_inception external-node run'; + command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node --'; diff --git a/core/tests/recovery-test/tests/genesis-recovery.test.ts b/core/tests/recovery-test/tests/genesis-recovery.test.ts index 54b9699788f2..a43f5a9e92be 100644 --- a/core/tests/recovery-test/tests/genesis-recovery.test.ts +++ b/core/tests/recovery-test/tests/genesis-recovery.test.ts @@ -4,11 +4,18 @@ import { ethers } from 'ethers'; import { NodeProcess, dropNodeData, getExternalNodeHealth, NodeComponents, sleep, FundedWallet } from '../src'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; + import path from 'path'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); +import { logsTestPath } from 'utils/build/logs'; + +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/recovery/genesis', name); +} + /** * Tests recovery of an external node from scratch. * @@ -34,6 +41,7 @@ describe('genesis recovery', () => { ZKSYNC_ENV: externalNodeEnvProfile, EN_SNAPSHOTS_RECOVERY_ENABLED: 'false' }; + const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; let mainNode: zksync.Provider; let externalNode: zksync.Provider; @@ -52,11 +60,17 @@ describe('genesis recovery', () => { if (fileConfig.loadFromFile) { const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); + const externalNodeGeneralConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + configsFolderSuffix: 'external_node', + config: 'general.yaml' + }); ethRpcUrl = secretsConfig.l1.l1_rpc_url; apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; - externalNodeUrl = 'http://127.0.0.1:3150'; - extNodeHealthUrl = 'http://127.0.0.1:3171/health'; + externalNodeUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; + extNodeHealthUrl = `http://127.0.0.1:${externalNodeGeneralConfig.api.healthcheck.port}/health`; } else { ethRpcUrl = process.env.ETH_CLIENT_WEB3_URL ?? 'http://127.0.0.1:8545'; apiWeb3JsonRpcHttpUrl = 'http://127.0.0.1:3050'; @@ -66,7 +80,9 @@ describe('genesis recovery', () => { mainNode = new zksync.Provider(apiWeb3JsonRpcHttpUrl); externalNode = new zksync.Provider(externalNodeUrl); - await NodeProcess.stopAll('KILL'); + if (autoKill) { + await NodeProcess.stopAll('KILL'); + } }); let fundedWallet: FundedWallet; @@ -96,16 +112,17 @@ describe('genesis recovery', () => { }); step('drop external node data', async () => { - await dropNodeData(fileConfig.loadFromFile, externalNodeEnv); + await dropNodeData(externalNodeEnv, fileConfig.loadFromFile, fileConfig.chain); }); step('initialize external node w/o a tree', async () => { externalNodeProcess = await NodeProcess.spawn( externalNodeEnv, - 'genesis-recovery.log', + await logsPath('external-node.log'), pathToHome, + NodeComponents.WITH_TREE_FETCHER_AND_NO_TREE, fileConfig.loadFromFile, - NodeComponents.WITH_TREE_FETCHER_AND_NO_TREE + fileConfig.chain ); const mainNodeBatchNumber = await mainNode.getL1BatchNumber(); @@ -186,8 +203,9 @@ describe('genesis recovery', () => { externalNodeEnv, externalNodeProcess.logs, pathToHome, + NodeComponents.WITH_TREE_FETCHER, fileConfig.loadFromFile, - NodeComponents.WITH_TREE_FETCHER + fileConfig.chain ); let isNodeReady = false; diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index bd508b0045c1..cadf146c5226 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -23,6 +23,7 @@ import { setTreeRecoveryParallelPersistenceBuffer } from './utils'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { logsTestPath } from 'utils/build/logs'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); @@ -59,6 +60,10 @@ interface TokenInfo { readonly l2_address: string; } +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/recovery/snapshot/', name); +} + /** * Tests snapshot recovery and node state pruning. * @@ -93,6 +98,8 @@ describe('snapshot recovery', () => { EN_EXPERIMENTAL_SNAPSHOTS_RECOVERY_TREE_PARALLEL_PERSISTENCE_BUFFER: '4' }; + const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; + let snapshotMetadata: GetSnapshotResponse; let mainNode: zksync.Provider; let externalNode: zksync.Provider; @@ -112,11 +119,18 @@ describe('snapshot recovery', () => { if (fileConfig.loadFromFile) { const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); + const externalNodeGeneralConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + configsFolderSuffix: 'external_node', + config: 'general.yaml' + }); ethRpcUrl = secretsConfig.l1.l1_rpc_url; apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; - externalNodeUrl = 'http://127.0.0.1:3150'; - extNodeHealthUrl = 'http://127.0.0.1:3171/health'; + + externalNodeUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; + extNodeHealthUrl = `http://127.0.0.1:${externalNodeGeneralConfig.api.healthcheck.port}/health`; setSnapshotRecovery(pathToHome, fileConfig, true); setTreeRecoveryParallelPersistenceBuffer(pathToHome, fileConfig, 4); @@ -129,7 +143,9 @@ describe('snapshot recovery', () => { mainNode = new zksync.Provider(apiWeb3JsonRpcHttpUrl); externalNode = new zksync.Provider(externalNodeUrl); - await NodeProcess.stopAll('KILL'); + if (autoKill) { + await NodeProcess.stopAll('KILL'); + } }); before('create test wallet', async () => { @@ -169,10 +185,7 @@ describe('snapshot recovery', () => { } step('create snapshot', async () => { - await executeCommandWithLogs( - fileConfig.loadFromFile ? `zk_supervisor snapshot create` : 'zk run snapshots-creator', - 'snapshot-creator.log' - ); + await createSnapshot(fileConfig.loadFromFile); }); step('validate snapshot', async () => { @@ -226,15 +239,17 @@ describe('snapshot recovery', () => { }); step('drop external node data', async () => { - await dropNodeData(fileConfig.loadFromFile, externalNodeEnv); + await dropNodeData(externalNodeEnv, fileConfig.loadFromFile, fileConfig.chain); }); step('initialize external node', async () => { externalNodeProcess = await NodeProcess.spawn( externalNodeEnv, - 'snapshot-recovery.log', + await logsPath('external_node.log'), pathToHome, - fileConfig.loadFromFile + NodeComponents.STANDARD, + fileConfig.loadFromFile, + fileConfig.chain ); let recoveryFinished = false; @@ -356,8 +371,9 @@ describe('snapshot recovery', () => { externalNodeEnv, externalNodeProcess.logs, pathToHome, + components, fileConfig.loadFromFile, - components + fileConfig.chain ); let isDbPrunerReady = false; @@ -441,3 +457,14 @@ async function decompressGzip(filePath: string): Promise { readStream.pipe(gunzip); }); } + +async function createSnapshot(zkSupervisor: boolean) { + let command = ''; + if (zkSupervisor) { + command = `zk_supervisor snapshot create`; + command += ` --chain ${fileConfig.chain}`; + } else { + command = `zk run snapshots-creator`; + } + await executeCommandWithLogs(command, 'snapshot-creator.log'); +} diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index 952f8865f842..42fa01a02c90 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -4,72 +4,32 @@ // main_contract.getTotalBatchesCommitted actually checks the number of batches committed. // main_contract.getTotalBatchesExecuted actually checks the number of batches executed. import * as utils from 'utils'; -import { Tester } from './tester'; -import { exec, runServerInBackground, runExternalNodeInBackground } from './utils'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect, assert } from 'chai'; -import fs from 'fs'; +import { assert, expect } from 'chai'; +import fs from 'node:fs/promises'; import * as child_process from 'child_process'; import * as dotenv from 'dotenv'; -import { - getAllConfigsPath, - loadConfig, - shouldLoadConfigFromFile, - replaceAggregatedBlockExecuteDeadline -} from 'utils/build/file-configs'; +import { loadConfig, replaceAggregatedBlockExecuteDeadline, shouldLoadConfigFromFile } from 'utils/build/file-configs'; import path from 'path'; +import { logsTestPath } from 'utils/build/logs'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); -let mainEnv: string; -let extEnv: string; - -let deploymentMode: string; - -if (fileConfig.loadFromFile) { - const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); - deploymentMode = genesisConfig.deploymentMode; -} else { - if (!process.env.DEPLOYMENT_MODE) { - throw new Error('DEPLOYMENT_MODE is not set'); - } - if (!['Validium', 'Rollup'].includes(process.env.DEPLOYMENT_MODE)) { - throw new Error(`Unknown deployment mode: ${process.env.DEPLOYMENT_MODE}`); - } - deploymentMode = process.env.DEPLOYMENT_MODE; -} - -if (deploymentMode == 'Validium') { - mainEnv = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; - extEnv = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; -} else { - // Rollup deployment mode - mainEnv = process.env.IN_DOCKER ? 'docker' : 'dev'; - extEnv = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; -} -const mainLogsPath: string = 'revert_main.log'; -const extLogsPath: string = 'revert_ext.log'; - -interface SuggestedValues { - lastExecutedL1BatchNumber: bigint; - nonce: number; - priorityFee: number; -} - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(jsonString: string): SuggestedValues { - const json = JSON.parse(jsonString); - assert(json && typeof json === 'object'); - assert(Number.isInteger(json.last_executed_l1_batch_number)); - assert(Number.isInteger(json.nonce)); - assert(Number.isInteger(json.priority_fee)); - return { - lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), - nonce: json.nonce, - priorityFee: json.priority_fee - }; +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/en', name); } function run(cmd: string, args: string[], options: child_process.SpawnOptions): child_process.SpawnSyncReturns { @@ -89,7 +49,7 @@ function compileBinaries() { // Fetches env vars for the given environment (like 'dev', 'ext-node'). // TODO: it would be better to import zk tool code directly. -function fetchEnv(zksyncEnv: string): any { +function fetchEnv(zksyncEnv: string): Record { let res = run('./bin/zk', ['f', 'env'], { cwd: process.env.ZKSYNC_HOME, env: { @@ -101,441 +61,225 @@ function fetchEnv(zksyncEnv: string): any { return { ...process.env, ...dotenv.parse(res.stdout) }; } -async function runBlockReverter(args: string[]): Promise { - let env = fetchEnv(mainEnv); - - let fileConfigFlags = ''; +/** Loads env profiles for the main and external nodes */ +function loadEnvs() { + let deploymentMode: string; if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } - - const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( - ' ' - )} ${fileConfigFlags}`; - const executedProcess = await exec(cmd, { - cwd: env.ZKSYNC_HOME, - env: { - ...env, - PATH: process.env.PATH - } - }); - - return executedProcess.stdout; -} - -async function killServerAndWaitForShutdown(tester: Tester, server: string) { - await utils.exec(`killall -9 ${server}`); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; - } - } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} - -class MainNode { - constructor(public tester: Tester) {} - - // Terminates all main node processes running. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_server'); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Spawns a main node. - // if enableConsensus is set, consensus component will be started in the main node. - // if enableExecute is NOT set, main node will NOT send L1 transactions to execute L1 batches. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - enableExecute: boolean, - ethClientWeb3Url: string, - apiWeb3JsonRpcHttpUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(mainEnv); - env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; - // Set full mode for the Merkle tree as it is required to get blocks committed. - env.DATABASE_MERKLE_TREE_MODE = 'full'; - - if (fileConfig.loadFromFile) { - replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); - } - - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; - } - - let proc = runServerInBackground({ - components: [components], - stdio: [null, logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile - }); - - // Wait until the main node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - while (true) { - try { - await tester.syncWallet.provider.getBlockNumber(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); - } - console.log('waiting for api endpoint'); - await utils.sleep(1); - } + const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); + deploymentMode = genesisConfig.deploymentMode; + } else { + deploymentMode = process.env.DEPLOYMENT_MODE ?? 'Rollup'; + if (!['Validium', 'Rollup'].includes(deploymentMode)) { + throw new Error(`Unknown deployment mode: ${deploymentMode}`); } - return new MainNode(tester); } -} - -class ExtNode { - constructor(public tester: Tester, private proc: child_process.ChildProcess) {} - - // Terminates all main node processes running. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_external_node'); - } catch (err) { - console.log(`ignored error: ${err}`); - } + console.log(`Using deployment mode: ${deploymentMode}`); + + let mainEnvName: string; + let extEnvName: string; + if (deploymentMode === 'Validium') { + mainEnvName = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; + } else { + // Rollup deployment mode + mainEnvName = process.env.IN_DOCKER ? 'docker' : 'dev'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; } - // Spawns an external node. - // If enableConsensus is set, the node will use consensus P2P network to fetch blocks. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - ethClientWeb3Url: string, - enEthClientUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(extEnv); - let args = []; - if (enableConsensus) { - args.push('--enable-consensus'); - } - - // Run server in background. - let proc = runExternalNodeInBackground({ - stdio: [null, logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile - }); - - // Wait until the node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - while (true) { - try { - await tester.syncWallet.provider.getBlockNumber(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`node failed to start, exitCode = ${proc.exitCode}`); - } - console.log('waiting for api endpoint'); - await utils.sleep(1); - } - } - return new ExtNode(tester, proc); - } - - // Waits for the node process to exit. - public async waitForExit(): Promise { - while (this.proc.exitCode === null) { - await utils.sleep(1); - } - return this.proc.exitCode; - } + console.log(`Fetching main node env: ${mainEnvName}`); + const mainEnv = fetchEnv(mainEnvName); + console.log(`Fetching EN env: ${extEnvName}`); + const extEnv = fetchEnv(extEnvName); + return [mainEnv, extEnv]; } describe('Block reverting test', function () { - let ethClientWeb3Url: string; - let apiWeb3JsonRpcHttpUrl: string; - let baseTokenAddress: string; - let enEthClientUrl: string; let operatorAddress: string; - let mainLogs: fs.WriteStream; - let extLogs: fs.WriteStream; let depositAmount: bigint; - let enableConsensus: boolean; + let mainNodeSpawner: NodeSpawner; + let mainEnv: Record; + let mainNode: Node; + let extNodeSpawner: NodeSpawner; + let extNode: Node; + let mainContract: IZkSyncHyperchain; + let alice: zksync.Wallet; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; + + const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; before('initialize test', async () => { + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; + let baseTokenAddress: string; + let enEthClientUrl: string; + + let extEnv; + [mainEnv, extEnv] = loadEnvs(); + if (fileConfig.loadFromFile) { const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); - const externalNodeConfig = loadConfig({ + const externalNodeGeneralConfig = loadConfig({ pathToHome, + configsFolderSuffix: 'external_node', chain: fileConfig.chain, - config: 'external_node.yaml' + config: 'general.yaml' }); const walletsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); ethClientWeb3Url = secretsConfig.l1.l1_rpc_url; apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; baseTokenAddress = contractsConfig.l1.base_token_addr; - enEthClientUrl = externalNodeConfig.main_node_url; + enEthClientUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; operatorAddress = walletsConfig.operator.address; } else { - let env = fetchEnv(mainEnv); - ethClientWeb3Url = env.ETH_CLIENT_WEB3_URL; - apiWeb3JsonRpcHttpUrl = env.API_WEB3_JSON_RPC_HTTP_URL; - baseTokenAddress = env.CONTRACTS_BASE_TOKEN_ADDR; - enEthClientUrl = `http://127.0.0.1:${env.EN_HTTP_PORT}`; + ethClientWeb3Url = mainEnv.ETH_CLIENT_WEB3_URL!; + apiWeb3JsonRpcHttpUrl = mainEnv.API_WEB3_JSON_RPC_HTTP_URL!; + baseTokenAddress = mainEnv.CONTRACTS_BASE_TOKEN_ADDR!; + enEthClientUrl = `http://127.0.0.1:${extEnv.EN_HTTP_PORT!}`; // TODO use env variable for this? operatorAddress = '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7'; } + const pathToMainLogs = await logsPath('server.log'); + const mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing main node logs to ${pathToMainLogs}`); + + const pathToEnLogs = await logsPath('external_node.log'); + const extLogs = await fs.open(pathToEnLogs, 'a'); + console.log(`Writing EN logs to ${pathToEnLogs}`); + if (process.env.SKIP_COMPILATION !== 'true' && !fileConfig.loadFromFile) { compileBinaries(); } - console.log(`PWD = ${process.env.PWD}`); - mainLogs = fs.createWriteStream(mainLogsPath, { flags: 'a' }); - extLogs = fs.createWriteStream(extLogsPath, { flags: 'a' }); - enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + console.log(`enableConsensus = ${enableConsensus}`); depositAmount = ethers.parseEther('0.001'); - }); - - step('run', async () => { - console.log('Make sure that nodes are not running'); - await ExtNode.terminateAll(); - await MainNode.terminateAll(); - console.log('Start main node'); - let mainNode = await MainNode.spawn( - mainLogs, + const mainNodeSpawnOptions = { enableConsensus, - true, ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress - ); - console.log('Start ext node'); - let extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); + }; + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, mainNodeSpawnOptions, mainEnv); + const extNodeSpawnOptions = { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl: enEthClientUrl, + baseTokenAddress + }; + extNodeSpawner = new NodeSpawner(pathToHome, extLogs, fileConfig, extNodeSpawnOptions, extEnv); + }); + + step('Make sure that nodes are not running', async () => { + if (autoKill) { + await Node.killAll(NodeType.MAIN); + await Node.killAll(NodeType.EXT); + } + }); + + step('Start main node', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); + step('Start external node', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); + + step('Fund wallets', async () => { await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); await extNode.tester.fundSyncWallet(); + alice = extNode.tester.emptyWallet(); + }); - const main_contract = await mainNode.tester.syncWallet.getMainContract(); - const baseToken = await mainNode.tester.syncWallet.getBaseToken(); - const isETHBasedChain = baseToken === zksync.utils.ETH_ADDRESS_IN_CONTRACTS; - const alice: zksync.Wallet = extNode.tester.emptyWallet(); + step('Seal L1 batch', async () => { + depositL1BatchNumber = await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log( - 'Finalize an L1 transaction to ensure at least 1 executed L1 batch and that all transactions are processed' - ); - const h: zksync.types.PriorityOpResponse = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await h.waitFinalize(); - - console.log('Restart the main node with L1 batch execution disabled.'); - await killServerAndWaitForShutdown(mainNode.tester, 'zksync_server'); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - false, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress - ); + step('wait for L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - console.log('Commit at least 2 L1 batches which are not executed'); - const lastExecuted = await main_contract.getTotalBatchesExecuted(); - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await main_contract.getTotalBatchesCommitted(); - const firstDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - await firstDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(0.1); - } + step('Restart main node with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - const secondDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1n) { - await utils.sleep(0.3); - } + // FIXME: need 2 batches? + step('seal another L1 batch', async () => { + await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - const alice2 = await alice.getBalance(); - while (true) { - const lastCommitted = await main_contract.getTotalBatchesCommitted(); - console.log(`lastExecuted = ${lastExecuted}, lastCommitted = ${lastCommitted}`); - if (lastCommitted - lastExecuted >= 2n) { - console.log('Terminate the main node'); - await killServerAndWaitForShutdown(mainNode.tester, 'zksync_server'); - break; - } - await utils.sleep(0.3); - } + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - console.log('Ask block_reverter to suggest to which L1 batch we should revert'); - const values_json = await runBlockReverter([ - 'print-suggested-values', - '--json', - '--operator-address', - operatorAddress - ]); - console.log(`values = ${values_json}`); - const values = parseSuggestedValues(values_json); - assert(lastExecuted === values.lastExecutedL1BatchNumber); - - console.log('Send reverting transaction to L1'); - await runBlockReverter([ - 'send-eth-transaction', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--nonce', - values.nonce.toString(), - '--priority-fee-per-gas', - values.priorityFee.toString() - ]); - - console.log('Check that batches are reverted on L1'); - const lastCommitted2 = await main_contract.getTotalBatchesCommitted(); - console.log(`lastCommitted = ${lastCommitted2}, want ${lastExecuted}`); - assert(lastCommitted2 === lastExecuted); - - console.log('Rollback db'); - await runBlockReverter([ - 'rollback-db', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--rollback-postgres', - '--rollback-tree', - '--rollback-sk-cache', - '--rollback-vm-runners-cache' - ]); - - console.log('Start main node.'); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - true, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); + }); + + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); + + step('revert batches', async () => { + await executeRevert( + pathToHome, + fileConfig.chain, + operatorAddress, + batchesCommittedBeforeRevert, + mainContract, + mainEnv ); + }); + + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - console.log('Wait for the external node to detect reorg and terminate'); + step('Wait for EN to detect reorg and terminate', async () => { await extNode.waitForExit(); + }); - console.log('Restart external node and wait for it to revert.'); - extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - - console.log('Execute an L1 transaction'); - const depositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); - await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - } + step('Restart EN', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); - // TODO: it would be nice to know WHY it "doesn't work well with block reversions" and what it actually means. - console.log( - "ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`." - ); - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - while (true) { - receipt = await extNode.tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - if (receipt != null) { - break; - } + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); } - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - - // The reverted transactions are expected to be reexecuted before the next transaction is applied. - // Hence we compare the state against the alice2, rather than against alice3. - const alice4want = alice2 + depositAmount; - const alice4 = await alice.getBalance(); - console.log(`Alice's balance is ${alice4}, want ${alice4want}`); - assert(alice4 === alice4want); - - console.log('Execute an L2 transaction'); - await checkedRandomTransfer(alice, 1n); + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); + + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(extNode.tester, alice, depositAmount); + const balanceAfter = await alice.getBalance(); + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); + }); + + step('check random transfer', async () => { + await checkRandomTransfer(alice, 1n); }); after('terminate nodes', async () => { - await MainNode.terminateAll(); - await ExtNode.terminateAll(); + await mainNode.terminate(); + await extNode.terminate(); if (fileConfig.loadFromFile) { replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, 10); } }); }); - -// Transfers amount from sender to a random wallet in an L2 transaction. -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiver = zksync.Wallet.createRandom().connect(sender.provider); - const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, type: 0 }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt === null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.provider!.getBalance(receiver.address); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index bea912d3305e..163a7294b5f6 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -1,94 +1,52 @@ import * as utils from 'utils'; -import { loadConfig, shouldLoadConfigFromFile, getAllConfigsPath } from 'utils/build/file-configs'; -import { runServerInBackground } from 'utils/build/server'; -import { Tester } from './tester'; +import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect } from 'chai'; -import fs from 'fs'; +import { assert } from 'chai'; import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; import path from 'path'; - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(suggestedValuesString: string): { - lastL1BatchNumber: bigint; - nonce: bigint; - priorityFee: bigint; -} { - const json = JSON.parse(suggestedValuesString); - if (!json || typeof json !== 'object') { - throw new TypeError('suggested values are not an object'); - } - - const lastL1BatchNumber = json.last_executed_l1_batch_number; - if (!Number.isInteger(lastL1BatchNumber)) { - throw new TypeError('suggested `lastL1BatchNumber` is not an integer'); - } - const nonce = json.nonce; - if (!Number.isInteger(nonce)) { - throw new TypeError('suggested `nonce` is not an integer'); - } - const priorityFee = json.priority_fee; - if (!Number.isInteger(priorityFee)) { - throw new TypeError('suggested `priorityFee` is not an integer'); - } - - return { - lastL1BatchNumber: BigInt(lastL1BatchNumber), - nonce: BigInt(nonce), - priorityFee: BigInt(priorityFee) - }; -} - -async function killServerAndWaitForShutdown(tester: Tester) { - await utils.exec('killall -9 zksync_server'); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; - } - } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} +import fs from 'node:fs/promises'; +import { logsTestPath } from 'utils/build/logs'; function ignoreError(_err: any, context?: string) { const message = context ? `Error ignored (context: ${context}).` : 'Error ignored.'; console.info(message); } -const depositAmount = ethers.parseEther('0.001'); - describe('Block reverting test', function () { - let tester: Tester; let alice: zksync.Wallet; let mainContract: IZkSyncHyperchain; - let blocksCommittedBeforeRevert: bigint; - let logs: fs.WriteStream; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; + let mainLogs: fs.FileHandle; let operatorAddress: string; + let baseTokenAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; + let mainNodeSpawner: NodeSpawner; + let mainNode: Node; const fileConfig = shouldLoadConfigFromFile(); - const pathToHome = path.join(__dirname, '../../../..'); - + const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; + const depositAmount = ethers.parseEther('0.001'); + + async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/', name); } before('initialize test', async () => { - // Clone file configs if necessary - let baseTokenAddress: string; - if (!fileConfig.loadFromFile) { operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR!; ethClientWeb3Url = process.env.ETH_CLIENT_WEB3_URL!; @@ -122,219 +80,112 @@ describe('Block reverting test', function () { baseTokenAddress = contractsConfig.l1.base_token_addr; } - // Create test wallets - tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - alice = tester.emptyWallet(); - logs = fs.createWriteStream('revert.log', { flags: 'a' }); - }); - - step('run server and execute some transactions', async () => { - // Make sure server isn't running. - await killServerAndWaitForShutdown(tester).catch(ignoreError); + const pathToMainLogs = await logsPath('server.log'); + mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing server logs to ${pathToMainLogs}`); - // Run server in background. - runServerInBackground({ - components: [components], - stdio: [null, logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress }); + }); - // Server may need some time to recompile if it's a cold run, so wait for it. - let iter = 0; - while (iter < 30 && !mainContract) { - try { - mainContract = await tester.syncWallet.getMainContract(); - } catch (err) { - ignoreError(err, 'waiting for server HTTP JSON-RPC to start'); - await utils.sleep(2); - iter += 1; - } - } - if (!mainContract) { - throw new Error('Server did not start'); - } - - await tester.fundSyncWallet(); - - // Seal 2 L1 batches. - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await tester.web3Provider.getL1BatchNumber(); - const firstDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await firstDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(1); - } - const secondDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1) { - await utils.sleep(1); + step('Make sure that the server is not running', async () => { + if (autoKill) { + // Make sure server isn't running. + await Node.killAll(NodeType.MAIN); } + }); - const balance = await alice.getBalance(); - expect(balance === depositAmount * 2n, 'Incorrect balance after deposits').to.be.true; - - // Check L1 committed and executed blocks. - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - let blocksExecuted = await mainContract.getTotalBatchesExecuted(); - let tryCount = 0; - while (blocksCommitted === blocksExecuted && tryCount < 100) { - blocksCommitted = await mainContract.getTotalBatchesCommitted(); - blocksExecuted = await mainContract.getTotalBatchesExecuted(); - tryCount += 1; - await utils.sleep(1); - } - expect(blocksCommitted > blocksExecuted, 'There is no committed but not executed block').to.be.true; - blocksCommittedBeforeRevert = blocksCommitted; + step('start server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - // Stop server. - await killServerAndWaitForShutdown(tester); + step('fund wallet', async () => { + await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); + alice = mainNode.tester.emptyWallet(); }); - step('revert blocks', async () => { - let fileConfigFlags = ''; - if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } + // Seal 2 L1 batches. + // One is not enough to test the reversion of sk cache because + // it gets updated with some batch logs only at the start of the next batch. + step('seal L1 batch', async () => { + depositL1BatchNumber = await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - const executedProcess = await utils.exec( - `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json --operator-address ${operatorAddress} ${fileConfigFlags}` - // ^ Switch off logs to not pollute the output JSON - ); - const suggestedValuesOutput = executedProcess.stdout; - const { lastL1BatchNumber, nonce, priorityFee } = parseSuggestedValues(suggestedValuesOutput); - expect(lastL1BatchNumber < blocksCommittedBeforeRevert, 'There should be at least one block for revert').to.be - .true; + step('wait for an L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - console.log( - `Reverting with parameters: last unreverted L1 batch number: ${lastL1BatchNumber}, nonce: ${nonce}, priorityFee: ${priorityFee}` - ); + step('restart server with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - console.log('Sending ETH transaction..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- send-eth-transaction --l1-batch-number ${lastL1BatchNumber} --nonce ${nonce} --priority-fee-per-gas ${priorityFee} ${fileConfigFlags}` - ); + step('seal another L1 batch', async () => { + await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log('Rolling back DB..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache --rollback-vm-runners-cache ${fileConfigFlags}` - ); + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - expect(blocksCommitted === lastL1BatchNumber, 'Revert on contract was unsuccessful').to.be.true; + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); }); - step('execute transaction after revert', async () => { - // Run server. - runServerInBackground({ - components: [components], - stdio: [null, logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile - }); - await utils.sleep(30); + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); - const balanceBefore = await alice.getBalance(); - expect(balanceBefore === depositAmount * 2n, 'Incorrect balance after revert').to.be.true; + step('revert batches', async () => { + await executeRevert(pathToHome, fileConfig.chain, operatorAddress, batchesCommittedBeforeRevert, mainContract); + }); - // Execute a transaction - const depositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); } + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); - // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - do { - receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - await utils.sleep(1); - } while (receipt == null); - - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(mainNode.tester, alice, depositAmount); const balanceAfter = await alice.getBalance(); - expect(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit').to.be.true; + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); }); step('execute transactions after simple restart', async () => { // Execute an L2 transaction - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); // Stop server. - await killServerAndWaitForShutdown(tester); + await mainNode.killAndWaitForShutdown(); // Run again. - runServerInBackground({ - components: [components], - stdio: [null, logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile - }); - await utils.sleep(30); + mainNode = await mainNodeSpawner.spawnMainNode(true); // Trying to send a transaction from the same address again - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); }); after('Try killing server', async () => { - await utils.exec('killall zksync_server').catch(ignoreError); + if (autoKill) { + await utils.exec('killall zksync_server').catch(ignoreError); + } }); }); - -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiverHD = zksync.Wallet.createRandom(); - const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); - const transferHandle = await sender.sendTransaction({ - to: receiver.address, - value: amount, - type: 0 - }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt == null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.getBalance(); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/tester.ts b/core/tests/revert-test/tests/tester.ts index faf7f0949232..1809b4c2784c 100644 --- a/core/tests/revert-test/tests/tester.ts +++ b/core/tests/revert-test/tests/tester.ts @@ -8,12 +8,12 @@ const BASE_ERC20_TO_MINT = ethers.parseEther('100'); export class Tester { public runningFee: Map; + constructor( public ethProvider: ethers.Provider, public ethWallet: ethers.Wallet, public syncWallet: zksync.Wallet, public web3Provider: zksync.Provider, - public hyperchainAdmin: ethers.Wallet, // We need to add validator to ValidatorTimelock with admin rights public isETHBasedChain: boolean, public baseTokenAddress: string ) { @@ -21,22 +21,27 @@ export class Tester { } // prettier-ignore - static async init(l1_rpc_addr: string, l2_rpc_addr: string, baseTokenAddress: string) : Promise { + static async init(l1_rpc_addr: string, l2_rpc_addr: string, baseTokenAddress: string): Promise { const ethProvider = new ethers.JsonRpcProvider(l1_rpc_addr); ethProvider.pollingInterval = 100; const testConfigPath = path.join(process.env.ZKSYNC_HOME!, `etc/test_config/constant`); - const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - const ethWalletHD = ethers.HDNodeWallet.fromMnemonic( - ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic), - "m/44'/60'/0'/0/0" - ); - const ethWallet = new ethers.Wallet(ethWalletHD.privateKey, ethProvider); - const hyperchainAdminHD = ethers.HDNodeWallet.fromMnemonic( - ethers.Mnemonic.fromPhrase(ethTestConfig.mnemonic), - "m/44'/60'/0'/0/1" - ); - const hyperchainAdmin = new ethers.Wallet(hyperchainAdminHD.privateKey, ethProvider); + const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, {encoding: 'utf-8'})); + + let ethWalletPK: string; + if (process.env.MASTER_WALLET_PK) { + ethWalletPK = process.env.MASTER_WALLET_PK; + } else { + const ethWalletHD = ethers.HDNodeWallet.fromMnemonic( + ethers.Mnemonic.fromPhrase(ethTestConfig.test_mnemonic), + "m/44'/60'/0'/0/0" + ); + + ethWalletPK = ethWalletHD.privateKey + } + + const ethWallet = new ethers.Wallet(ethWalletPK, ethProvider); + const web3Provider = new zksync.Provider(l2_rpc_addr); web3Provider.pollingInterval = 100; // It's OK to keep it low even on stage. const syncWallet = new zksync.Wallet(ethWallet.privateKey, web3Provider, ethProvider); @@ -54,7 +59,12 @@ export class Tester { // anyways. We will also set the miner's tip to 5 gwei, which is also much higher than the normal one. const maxFeePerGas = ethers.parseEther("0.00000025"); // 250 gwei const maxPriorityFeePerGas = ethers.parseEther("0.000000005"); // 5 gwei - cancellationTxs.push(ethWallet.sendTransaction({ to: ethWallet.address, nonce, maxFeePerGas, maxPriorityFeePerGas }).then((tx) => tx.wait())); + cancellationTxs.push(ethWallet.sendTransaction({ + to: ethWallet.address, + nonce, + maxFeePerGas, + maxPriorityFeePerGas + }).then((tx) => tx.wait())); } if (cancellationTxs.length > 0) { await Promise.all(cancellationTxs); @@ -63,7 +73,7 @@ export class Tester { const isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; - return new Tester(ethProvider, ethWallet, syncWallet, web3Provider, hyperchainAdmin, isETHBasedChain, baseTokenAddress); + return new Tester(ethProvider, ethWallet, syncWallet, web3Provider, isETHBasedChain, baseTokenAddress); } /// Ensures that the main wallet has enough base token. diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index 4bf38387cccf..ea8a45b97c37 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -1,5 +1,13 @@ import { exec as _exec, spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; import { promisify } from 'util'; +import { assert, expect } from 'chai'; +import { FileConfig, getAllConfigsPath, replaceAggregatedBlockExecuteDeadline } from 'utils/build/file-configs'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; +import { Tester } from './tester'; +import { killPidWithAllChilds } from 'utils/build/kill'; +import * as utils from 'utils'; +import fs from 'node:fs/promises'; +import * as zksync from 'zksync-ethers'; // executes a command in background and returns a child process handle // by default pipes data to parent's stdio but this can be overridden @@ -15,6 +23,7 @@ export function background({ env?: ProcessEnvOptions['env']; }): ChildProcessWithoutNullStreams { command = command.replace(/\n/g, ' '); + console.log(`Run command ${command}`); return _spawn(command, { stdio: stdio, shell: true, detached: true, cwd, env }); } @@ -42,15 +51,25 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception + useZkInception, + chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; useZkInception?: boolean; + chain?: string; }): ChildProcessWithoutNullStreams { - let command = useZkInception ? 'zk_inception server' : 'zk server'; + let command = ''; + if (useZkInception) { + command = 'zk_inception server'; + if (chain) { + command += ` --chain ${chain}`; + } + } else { + command = 'zk server'; + } return runInBackground({ command, components, stdio, cwd, env }); } @@ -59,15 +78,24 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception + useZkInception, + chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; useZkInception?: boolean; + chain?: string; }): ChildProcessWithoutNullStreams { - let command = useZkInception ? 'zk_inception external-node run' : 'zk external-node'; + let command = ''; + if (useZkInception) { + command = 'zk_inception external-node run'; + command += chain ? ` --chain ${chain}` : ''; + } else { + command = 'zk external-node'; + } + return runInBackground({ command, components, stdio, cwd, env }); } @@ -75,7 +103,394 @@ export function runExternalNodeInBackground({ // spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" // returns { stdout, stderr } const promisified = promisify(_exec); + export function exec(command: string, options: ProcessEnvOptions) { command = command.replace(/\n/g, ' '); return promisified(command, options); } + +export interface SuggestedValues { + lastExecutedL1BatchNumber: bigint; + nonce: number; + priorityFee: number; +} + +/** Parses output of "print-suggested-values" command of the revert block tool. */ +export function parseSuggestedValues(jsonString: string): SuggestedValues { + const json = JSON.parse(jsonString); + assert(json && typeof json === 'object'); + assert(Number.isInteger(json.last_executed_l1_batch_number)); + assert(Number.isInteger(json.nonce)); + assert(Number.isInteger(json.priority_fee)); + return { + lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), + nonce: json.nonce, + priorityFee: json.priority_fee + }; +} + +async function runBlockReverter( + pathToHome: string, + chain: string | undefined, + env: ProcessEnvOptions['env'] | undefined, + args: string[] +): Promise { + let fileConfigFlags = ''; + if (chain) { + const configPaths = getAllConfigsPath({ pathToHome, chain }); + fileConfigFlags = ` + --config-path=${configPaths['general.yaml']} + --contracts-config-path=${configPaths['contracts.yaml']} + --secrets-path=${configPaths['secrets.yaml']} + --wallets-path=${configPaths['wallets.yaml']} + --genesis-path=${configPaths['genesis.yaml']} + `; + } + + const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( + ' ' + )} ${fileConfigFlags}`; + + const options = env + ? { + cwd: env.ZKSYNC_HOME, + env: { + ...env, + PATH: process.env.PATH + } + } + : {}; + const executedProcess = await exec(cmd, options); + return executedProcess.stdout; +} + +export async function executeRevert( + pathToHome: string, + chain: string | undefined, + operatorAddress: string, + batchesCommittedBeforeRevert: bigint, + mainContract: IZkSyncHyperchain, + env?: ProcessEnvOptions['env'] +) { + const suggestedValuesOutput = await runBlockReverter(pathToHome, chain, env, [ + 'print-suggested-values', + '--json', + '--operator-address', + operatorAddress + ]); + const values = parseSuggestedValues(suggestedValuesOutput); + assert( + values.lastExecutedL1BatchNumber < batchesCommittedBeforeRevert, + 'There should be at least one block for revert' + ); + + console.log('Reverting with parameters', values); + + console.log('Sending ETH transaction..'); + await runBlockReverter(pathToHome, chain, env, [ + 'send-eth-transaction', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--nonce', + values.nonce.toString(), + '--priority-fee-per-gas', + values.priorityFee.toString() + ]); + + console.log('Rolling back DB..'); + await runBlockReverter(pathToHome, chain, env, [ + 'rollback-db', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--rollback-postgres', + '--rollback-tree', + '--rollback-sk-cache', + '--rollback-vm-runners-cache' + ]); + + const blocksCommitted = await mainContract.getTotalBatchesCommitted(); + assert(blocksCommitted === values.lastExecutedL1BatchNumber, 'Revert on contract was unsuccessful'); +} + +export interface MainNodeSpawnOptions { + enableConsensus: boolean; + ethClientWeb3Url: string; + apiWeb3JsonRpcHttpUrl: string; + baseTokenAddress: string; +} + +export enum NodeType { + MAIN = 'zksync_server', + EXT = 'zksync_external_node' +} + +export class Node { + constructor( + public readonly tester: Tester, + private readonly proc: ChildProcessWithoutNullStreams, + private readonly type: TYPE + ) {} + + public async terminate() { + try { + await killPidWithAllChilds(this.proc.pid!, 9); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** + * Terminates all main node processes running. + * + * WARNING: This is not safe to use when running nodes on multiple chains. + */ + public static async killAll(type: NodeType) { + try { + await utils.exec(`killall -KILL ${type}`); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** Waits for the node process to exit. */ + public async waitForExit(): Promise { + while (this.proc.exitCode === null) { + await utils.sleep(1); + } + return this.proc.exitCode; + } + + public async killAndWaitForShutdown() { + await this.terminate(); + // Wait until it's really stopped. + let iter = 0; + while (iter < 30) { + try { + await this.tester.syncWallet.provider.getBlockNumber(); + await utils.sleep(2); + iter += 1; + } catch (_) { + // When exception happens, we assume that server died. + return; + } + } + // It's going to panic anyway, since the server is a singleton entity, so better to exit early. + throw new Error(`${this.type} didn't stop after a kill request`); + } + + public async createBatchWithDeposit(to: string, amount: bigint) { + const initialL1BatchNumber = await this.tester.web3Provider.getL1BatchNumber(); + console.log(`Initial L1 batch: ${initialL1BatchNumber}`); + + const depositHandle = await this.tester.syncWallet.deposit({ + token: this.tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : this.tester.baseTokenAddress, + amount, + to, + approveBaseERC20: true, + approveERC20: true + }); + + let depositBatchNumber; + while (!(depositBatchNumber = (await depositHandle.wait()).l1BatchNumber)) { + console.log('Deposit is not included in L1 batch; sleeping'); + await utils.sleep(1); + } + console.log(`Deposit was included into L1 batch ${depositBatchNumber}`); + expect(depositBatchNumber).to.be.greaterThan(initialL1BatchNumber); + return depositBatchNumber; + } +} + +export class NodeSpawner { + public constructor( + private readonly pathToHome: string, + private readonly logs: fs.FileHandle, + private readonly fileConfig: FileConfig, + private readonly options: MainNodeSpawnOptions, + private readonly env?: ProcessEnvOptions['env'] + ) {} + + public async spawnMainNode(enableExecute: boolean): Promise> { + const env = this.env ?? process.env; + env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; + // Set full mode for the Merkle tree as it is required to get blocks committed. + env.DATABASE_MERKLE_TREE_MODE = 'full'; + + const { fileConfig, pathToHome, options, logs } = this; + + if (fileConfig.loadFromFile) { + replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); + } + + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; + if (options.enableConsensus) { + components += ',consensus'; + } + if (options.baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { + components += ',base_token_ratio_persister'; + } + let proc = runServerInBackground({ + components: [components], + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + // Wait until the main node starts responding. + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.MAIN); + } + + public async spawnExtNode(): Promise> { + const env = this.env ?? process.env; + const { pathToHome, fileConfig, logs, options } = this; + + let args = []; // FIXME: unused + if (options.enableConsensus) { + args.push('--enable-consensus'); + } + + // Run server in background. + let proc = runExternalNodeInBackground({ + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.EXT); + } +} + +async function waitForNodeToStart(tester: Tester, proc: ChildProcessWithoutNullStreams, l2Url: string) { + while (true) { + try { + const blockNumber = await tester.syncWallet.provider.getBlockNumber(); + console.log(`Initialized node API on ${l2Url}; latest block: ${blockNumber}`); + break; + } catch (err) { + if (proc.exitCode != null) { + assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); + } + console.log(`Node waiting for API on ${l2Url}`); + await utils.sleep(1); + } + } +} + +export async function waitToExecuteBatch(mainContract: IZkSyncHyperchain, latestBatch: number) { + let tryCount = 0; + const initialExecutedBatch = await mainContract.getTotalBatchesExecuted(); + console.log(`Initial executed L1 batch: ${initialExecutedBatch}`); + + if (initialExecutedBatch >= latestBatch) { + console.log('Latest batch is executed; no need to wait'); + return; + } + + let lastExecutedBatch; + while ( + (lastExecutedBatch = await mainContract.getTotalBatchesExecuted()) === initialExecutedBatch && + tryCount < 100 + ) { + console.log(`Last executed batch: ${lastExecutedBatch}`); + tryCount++; + await utils.sleep(1); + } + assert(lastExecutedBatch > initialExecutedBatch); +} + +export async function waitToCommitBatchesWithoutExecution(mainContract: IZkSyncHyperchain): Promise { + let batchesCommitted = await mainContract.getTotalBatchesCommitted(); + let batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + + let tryCount = 0; + while ((batchesExecuted === 0n || batchesCommitted === batchesExecuted) && tryCount < 100) { + await utils.sleep(1); + batchesCommitted = await mainContract.getTotalBatchesCommitted(); + batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + tryCount += 1; + } + expect(batchesCommitted > batchesExecuted, 'There is no committed but not executed batch').to.be.true; + return batchesCommitted; +} + +export async function executeDepositAfterRevert(tester: Tester, wallet: zksync.Wallet, amount: bigint) { + const depositHandle = await tester.syncWallet.deposit({ + token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, + amount, + to: wallet.address, + approveBaseERC20: true, + approveERC20: true + }); + + let l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + while (!l1TxResponse) { + console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + await utils.sleep(1); + l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + } + console.log(`Got L1 deposit tx`, l1TxResponse); + + // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. + const l2Tx = await wallet._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); + let receipt = null; + while (receipt === null) { + console.log(`L2 deposit transaction ${l2Tx.hash} is not confirmed; sleeping`); + await utils.sleep(1); + receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); + } + expect(receipt.status).to.be.eql(1); + console.log(`L2 deposit transaction ${l2Tx.hash} is confirmed`); + + await depositHandle.waitFinalize(); + console.log('New deposit is finalized'); +} + +export async function checkRandomTransfer(sender: zksync.Wallet, amount: bigint) { + const senderBalanceBefore = await sender.getBalance(); + console.log(`Sender's balance before transfer: ${senderBalanceBefore}`); + + const receiverHD = zksync.Wallet.createRandom(); + const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); + const transferHandle = await sender.sendTransaction({ + to: receiver.address, + value: amount, + type: 0 + }); + + // ethers doesn't work well with block reversions, so we poll for the receipt manually. + let txReceipt = null; + while (txReceipt === null) { + console.log(`Transfer ${transferHandle.hash} is not confirmed, sleeping`); + await utils.sleep(1); + txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); + } + + const senderBalance = await sender.getBalance(); + console.log(`Sender's balance after transfer: ${senderBalance}`); + const receiverBalance = await receiver.getBalance(); + console.log(`Receiver's balance after transfer: ${receiverBalance}`); + + assert(receiverBalance === amount, 'Failed updated the balance of the receiver'); + + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + console.log(`Expected spent amount: ${spentAmount}`); + assert(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender'); +} diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index e259ce209c63..28e3d609e63d 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -89,7 +89,7 @@ impl Account { pub fn default_fee() -> Fee { Fee { - gas_limit: U256::from(2000000000u32), + gas_limit: U256::from(2_000_000_000u32), max_fee_per_gas: U256::from(BASE_FEE), max_priority_fee_per_gas: U256::from(100), gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index 109e7a1e008a..8fa5ea1eb721 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -1,20 +1,21 @@ { - "reporters": [ - "default", - "github-actions" - ], - "transform": { - "^.+\\.ts?$": "ts-jest" - }, - "//": "!!! Do not increase the test timeout blindly!!!", - "//": "Timeout is set to match ~4 L1 operations with 10 blocks confirmation", - "//": "If you need bigger timeout, consider either disabling the test outside of fast mode or increasing timeout on a single test", - "//": "If this value would be too big, it may cause tests on stage to get stuck for too long", - "testTimeout": 605000, - "globalSetup": "/src/jest-setup/global-setup.ts", - "globalTeardown": "/src/jest-setup/global-teardown.ts", - "setupFilesAfterEnv": [ - "/src/jest-setup/add-matchers.ts" - ], - "slowTestThreshold": 120 + "maxWorkers": "70%", + "reporters": [ + "default", + "github-actions" + ], + "transform": { + "^.+\\.ts?$": "ts-jest" + }, + "//": "!!! Do not increase the test timeout blindly!!!", + "//": "Timeout is set to match ~4 L1 operations with 10 blocks confirmation", + "//": "If you need bigger timeout, consider either disabling the test outside of fast mode or increasing timeout on a single test", + "//": "If this value would be too big, it may cause tests on stage to get stuck for too long", + "testTimeout": 605000, + "globalSetup": "/src/jest-setup/global-setup.ts", + "globalTeardown": "/src/jest-setup/global-teardown.ts", + "setupFilesAfterEnv": [ + "/src/jest-setup/add-matchers.ts" + ], + "slowTestThreshold": 120 } diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 03bd84bb3f48..0e9b863d8e16 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -4,7 +4,7 @@ "license": "MIT", "private": true, "scripts": { - "test": "zk f jest --forceExit --testTimeout 60000", + "test": "zk f jest --forceExit --verbose --testTimeout 120000", "long-running-test": "zk f jest", "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts", diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index 6cc2bed0a8dd..71c8227af2c5 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -553,7 +553,6 @@ export class TestContextOwner { break; } const lastNodeBatch = await this.l2Provider.getL1BatchNumber(); - this.reporter.debug(`VM playground progress: L1 batch #${lastProcessedBatch} / ${lastNodeBatch}`); if (lastProcessedBatch >= lastNodeBatch) { break; @@ -581,7 +580,7 @@ export class TestContextOwner { }; } - const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; + const healthcheckPort = this.env.healthcheckPort; const nodeHealth = (await (await fetch(`http://127.0.0.1:${healthcheckPort}/health`)).json()) as NodeHealth; const playgroundHealth = nodeHealth.components.vm_playground; if (playgroundHealth === undefined) { @@ -606,7 +605,7 @@ export class TestContextOwner { // Reset the reporter context. this.reporter = new Reporter(); try { - if (this.env.nodeMode == NodeMode.Main && this.env.network === 'localhost') { + if (this.env.nodeMode == NodeMode.Main && this.env.network.toLowerCase() === 'localhost') { // Check that the VM execution hasn't diverged using the VM playground. The component and thus the main node // will crash on divergence, so we just need to make sure that the test doesn't exit before the VM playground // processes all batches on the node. diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 8f6ff12224b4..ffef0fce5ce3 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -43,13 +43,17 @@ export async function waitForServer(l2NodeUrl: string) { throw new Error('Failed to wait for the server to start'); } -function getMainWalletPk(pathToHome: string, network: string): string { - if (network.toLowerCase() == 'localhost') { +function getMainWalletPk(pathToHome: string): string { + if (process.env.MASTER_WALLET_PK) { + return process.env.MASTER_WALLET_PK; + } else { const testConfigPath = path.join(pathToHome, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); - return ethers.Wallet.fromPhrase(ethTestConfig.test_mnemonic).privateKey; - } else { - return ensureVariable(process.env.MASTER_WALLET_PK, 'Main wallet private key'); + + let pk = ethers.Wallet.fromPhrase(ethTestConfig['test_mnemonic']).privateKey; + process.env.MASTER_WALLET_PK = pk; + + return pk; } } @@ -73,7 +77,8 @@ async function loadTestEnvironmentFromFile(chain: string): Promise { const network = process.env.CHAIN_ETH_NETWORK || 'localhost'; const pathToHome = path.join(__dirname, '../../../../'); - let mainWalletPK = getMainWalletPk(pathToHome, network); + let mainWalletPK = getMainWalletPk(pathToHome); const l2NodeUrl = ensureVariable( process.env.ZKSYNC_WEB3_API_URL || process.env.API_WEB3_JSON_RPC_HTTP_URL, @@ -237,6 +244,7 @@ export async function loadTestEnvironmentFromEnv(): Promise { process.env.EN_REQ_ENTITIES_LIMIT ?? process.env.API_WEB3_JSON_RPC_REQ_ENTITIES_LIMIT! ); + const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; return { maxLogsLimit, pathToHome, @@ -251,6 +259,7 @@ export async function loadTestEnvironmentFromEnv(): Promise { l2NodeUrl, l1NodeUrl, wsL2NodeUrl, + healthcheckPort, contractVerificationUrl, erc20Token: { name: token.name, diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index 415a8519a1b4..4975b7b612cf 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -89,6 +89,7 @@ export interface TestEnvironment { * Description of the "base" ERC20 token used in the tests. */ baseToken: Token; + healthcheckPort: string; } /** diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index b20e9d1e37d3..79789e744471 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -202,7 +202,7 @@ describe('web3 API compatibility tests', () => { test('Should test web3 response extensions', async () => { if (testMaster.isFastMode()) { - // This test requires a new L1 batch to be created, which may be very time consuming on stage. + // This test requires a new L1 batch to be created, which may be very time-consuming on stage. return; } @@ -333,7 +333,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notifier is not reactive + tests are being run in parallel, so we can't expect that the next block // would be expected one. Instead, we just want to receive an event with the particular block number. - wsProvider.on('block', (block) => { + await wsProvider.on('block', (block) => { if (block >= currentBlock) { newBlock = block; } @@ -355,7 +355,6 @@ describe('web3 API compatibility tests', () => { // ...though the gap should not be *too* big. expect(newBlock).toBeLessThan(currentBlock + 100); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -368,7 +367,7 @@ describe('web3 API compatibility tests', () => { let newTxHash: string | null = null; // We can't use `once` as there may be other pending txs sent together with our one. - wsProvider.on('pending', async (txHash) => { + await wsProvider.on('pending', async (txHash) => { const tx = await alice.provider.getTransaction(txHash); // We're waiting for the exact transaction to appear. if (!tx || tx.to != uniqueRecipient) { @@ -392,7 +391,6 @@ describe('web3 API compatibility tests', () => { expect(newTxHash as string).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -404,7 +402,7 @@ describe('web3 API compatibility tests', () => { // We're sending a few transfers from the wallet, so we'll use a new account to make event unique. let uniqueRecipient = testMaster.newEmptyAccount().address; - // Setup a filter for an ERC20 transfer. + // Set up a filter for an ERC20 transfer. const erc20TransferTopic = ethers.id('Transfer(address,address,uint256)'); let filter = { address: l2Token, @@ -414,15 +412,15 @@ describe('web3 API compatibility tests', () => { ethers.zeroPadValue(uniqueRecipient, 32) // Recipient ] }; - wsProvider.once(filter, (event) => { + await wsProvider.once(filter, (event) => { newEvent = event; }); - // Setup a filter that should not match anything. + // Set up a filter that should not match anything. let incorrectFilter = { address: alice.address }; - wsProvider.once(incorrectFilter, (_) => { + await wsProvider.once(incorrectFilter, (_) => { expect(null).fail('Found log for incorrect filter'); }); @@ -439,7 +437,6 @@ describe('web3 API compatibility tests', () => { expect((newEvent as any).transactionHash).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -608,7 +605,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notify is not reactive and may be laggy, so we want to increase the chances // for test to pass. So we try to sleep a few iterations until we receive expected amount - // of events. If we won't receive them, we continue and the test will fail anyway. + // of events. If we don't receive them, we continue and the test will fail anyway. const expectedTrivialEventsCount = 2; const expectedSimpleEventsCount = 2; const expectedIndexedEventsCount = 1; @@ -681,42 +678,9 @@ describe('web3 API compatibility tests', () => { ).resolves.toHaveProperty('result', expect.stringMatching(HEX_VALUE_REGEX)); }); - test('Should check API returns error when there are too many logs in eth_getLogs', async () => { - const contract = await deployContract(alice, contracts.events, []); - const maxLogsLimit = testMaster.environment().maxLogsLimit; - - // Send 3 transactions that emit `maxLogsLimit / 2` events. - const tx1 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx1Receipt = await tx1.wait(); - - const tx2 = await contract.emitManyEvents(maxLogsLimit / 2); - await tx2.wait(); - - const tx3 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx3Receipt = await tx3.wait(); - - // There are around `0.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx1Receipt.blockNumber] range, - // so query with such filter should succeed. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx1Receipt.blockNumber - }) - ).resolves; - - // There are at least `1.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx3Receipt.blockNumber] range, - // so query with such filter should fail. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx3Receipt.blockNumber - }) - ).rejects.toThrow(`Query returned more than ${maxLogsLimit} results.`); - }); - test('Should throw error for estimate gas for account with balance < tx.value', async () => { let poorBob = testMaster.newEmptyAccount(); - expect( + await expect( poorBob.estimateGas({ value: 1, to: alice.address }) ).toBeRejected(/*'insufficient balance for transfer'*/); }); @@ -860,7 +824,7 @@ describe('web3 API compatibility tests', () => { const getLogsByHash = (await alice.provider.getLogs({ blockHash: latestBlock.hash || undefined })).map((x) => { return new zksync.types.Log({ ...x, l1BatchNumber: 0 }, alice.provider); // Set bogus value. }); - await expect(getLogsByNumber).toEqual(getLogsByHash); + expect(getLogsByNumber).toEqual(getLogsByHash); // Check that incorrect queries are rejected. await expect( @@ -1030,7 +994,7 @@ describe('web3 API compatibility tests', () => { const incrementFunctionData = contract2.interface.encodeFunctionData('increment', [1]); // Assert that the estimation fails because the increment function is not present in contract1 - expect( + await expect( alice.provider.estimateGas({ to: contract1Address.toString(), data: incrementFunctionData diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index adb32def5b07..70df1dff9282 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -9,6 +9,9 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { scaledGasPrice } from '../src/helpers'; +const SECONDS = 1000; +jest.setTimeout(100 * SECONDS); + describe('base ERC20 contract checks', () => { let testMaster: TestMaster; let alice: zksync.Wallet; diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index 8b0bd347ce78..cb1bec35b51b 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -15,6 +15,9 @@ import * as zksync from 'zksync-ethers'; import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; +const SECONDS = 1000; +jest.setTimeout(300 * SECONDS); + // TODO: Leave only important ones. const contracts = { counter: getTestContract('Counter'), @@ -35,6 +38,7 @@ describe('Smart contract behavior checks', () => { // Contracts shared in several tests. let counterContract: zksync.Contract; + let expensiveContract: zksync.Contract; beforeAll(() => { testMaster = TestMaster.getInstance(__filename); @@ -71,12 +75,13 @@ describe('Smart contract behavior checks', () => { }); test('Should perform "expensive" contract calls', async () => { - const expensiveContract = await deployContract(alice, contracts.expensive, []); - - // First, check that the transaction that is too expensive would be rejected by the API server. + expensiveContract = await deployContract(alice, contracts.expensive, []); + // Check that the transaction that is too expensive would be rejected by the API server. await expect(expensiveContract.expensive(15000)).toBeRejected(); + }); - // Second, check that processable transaction may fail with "out of gas" error. + test('Should perform underpriced "expensive" contract calls', async () => { + // Check that processable transaction may fail with "out of gas" error. // To do so, we estimate gas for arg "1" and supply it to arg "20". // This guarantees that transaction won't fail during verification. const lowGasLimit = await expensiveContract.expensive.estimateGas(1); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index abeaa4e27553..2e223b9d7441 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -89,25 +89,29 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - let walletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); - adminGovWallet = new ethers.Wallet(walletConfig.governor.private_key, alice._providerL1()); + adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); - walletConfig = loadConfig({ + const ecosystemWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, configsFolder: '../../configs/', config: 'wallets.yaml' }); - ecosystemGovWallet = new ethers.Wallet(walletConfig.governor.private_key, alice._providerL1()); + if (ecosystemWalletConfig.governor.private_key == chainWalletConfig.governor.private_key) { + ecosystemGovWallet = adminGovWallet; + } else { + ecosystemGovWallet = new ethers.Wallet(ecosystemWalletConfig.governor.private_key, alice._providerL1()); + } } else { let govMnemonic = ethers.Mnemonic.fromPhrase( require('../../../../etc/test_config/constant/eth.json').mnemonic ); let govWalletHD = ethers.HDNodeWallet.fromMnemonic(govMnemonic, "m/44'/60'/0'/0/1"); adminGovWallet = new ethers.Wallet(govWalletHD.privateKey, alice._providerL1()); - ecosystemGovWallet = new ethers.Wallet(govWalletHD.privateKey, alice._providerL1()); + ecosystemGovWallet = adminGovWallet; } logs = fs.createWriteStream('upgrade.log', { flags: 'a' }); @@ -138,9 +142,10 @@ describe('Upgrade test', function () { // Run server in background. runServerInBackground({ components: serverComponents, - stdio: [null, logs, logs], + stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain }); // Server may need some time to recompile if it's a cold run, so wait for it. let iter = 0; @@ -279,9 +284,11 @@ describe('Upgrade test', function () { ); executeOperation = chainUpgradeCalldata; + console.log('Sending scheduleTransparentOperation'); await sendGovernanceOperation(stmUpgradeData.scheduleTransparentOperation); + console.log('Sending executeOperation'); await sendGovernanceOperation(stmUpgradeData.executeOperation); - + console.log('Sending chain admin operation'); await sendChainAdminOperation(setTimestampCalldata); // Wait for server to process L1 event. @@ -345,9 +352,10 @@ describe('Upgrade test', function () { // Run again. runServerInBackground({ components: serverComponents, - stdio: [null, logs, logs], + stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain }); await utils.sleep(10); @@ -369,23 +377,25 @@ describe('Upgrade test', function () { }); async function sendGovernanceOperation(data: string) { - await ( - await ecosystemGovWallet.sendTransaction({ - to: await governanceContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await ecosystemGovWallet.sendTransaction({ + to: await governanceContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent governance operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Governance operation succeeded, tx_hash=${transaction.hash}`); } async function sendChainAdminOperation(data: string) { - await ( - await adminGovWallet.sendTransaction({ - to: await chainAdminContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await adminGovWallet.sendTransaction({ + to: await chainAdminContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent chain admin operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Chain admin operation succeeded, tx_hash=${transaction.hash}`); } }); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index d4a7aded4c39..7a7829caf86b 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -7,16 +7,23 @@ export function runServerInBackground({ components, stdio, cwd, - useZkInception + useZkInception, + chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; useZkInception?: boolean; + chain?: string; }) { - let command = useZkInception - ? 'zk_inception server' - : 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; + let command = ''; + + if (useZkInception) { + command = 'zk_inception server'; + command += chain ? ` --chain ${chain}` : ''; + } else { + command = 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; + } if (components && components.length > 0) { command += ` --components=${components.join(',')}`; } diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 27218d79aafe..4586c637e128 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -6,46 +6,30 @@ license.workspace = true publish = false [dependencies] +zksync_contracts.workspace = true +zksync_multivm.workspace = true zksync_types.workspace = true +zksync_utils.workspace = true zksync_vlog.workspace = true -zksync_vm_benchmark_harness.workspace = true +criterion.workspace = true +once_cell.workspace = true rand.workspace = true vise.workspace = true tokio.workspace = true [dev-dependencies] -criterion.workspace = true +assert_matches.workspace = true iai.workspace = true [[bench]] -name = "criterion" +name = "oneshot" harness = false [[bench]] -name = "diy_benchmark" +name = "batch" harness = false [[bench]] name = "iai" harness = false - -[[bench]] -name = "fill_bootloader" -harness = false - -[[bin]] -name = "iai_results_to_prometheus" -path = "src/iai_results_to_prometheus.rs" - -[[bin]] -name = "compare_iai_results" -path = "src/compare_iai_results.rs" - -[[bin]] -name = "find-slowest" -path = "src/find_slowest.rs" - -[[bin]] -name = "instruction-counts" -path = "src/instruction_counts.rs" diff --git a/core/tests/vm-benchmark/README.md b/core/tests/vm-benchmark/README.md index cecbdb31d0cf..b7f056894e73 100644 --- a/core/tests/vm-benchmark/README.md +++ b/core/tests/vm-benchmark/README.md @@ -9,35 +9,22 @@ benchmarks, however. There are three different benchmarking tools available: ```sh -cargo bench --bench criterion -cargo bench --bench diy_benchmark +cargo bench --bench oneshot +cargo bench --bench batch cargo +nightly bench --bench iai ``` -Criterion is the de-facto microbenchmarking tool for Rust. Run it, then optimize something and run the command again to -see if your changes have made a difference. +`oneshot` and `batch` targets use Criterion, the de-facto standard micro-benchmarking tool for Rust. `oneshot` measures +VM performance on single transactions, and `batch` on entire batches of up to 5,000 transactions. Run these benches, +then optimize something and run the command again to see if your changes have made a difference. -The DIY benchmark works a bit better in noisy environments and is used to push benchmark data to Prometheus -automatically. +IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it, but it also doesn't measure exactly +the same thing as normal benchmarks. You need valgrind to be able to run it. -IAI uses cachegrind to simulate the CPU, so noise is completely irrelevant to it but it also doesn't measure exactly the -same thing as normal benchmarks. You need valgrind to be able to run it. - -You can add your own bytecodes to be benchmarked into the folder "deployment_benchmarks". For iai, you also need to add -them to "benches/iai.rs". +You can add new bytecodes to be benchmarked into the [`bytecodes`](src/bytecodes) directory and then add them to the +`BYTECODES` constant exported by the crate. ## Profiling (Linux only) You can also use `sh perf.sh bytecode_file` to produce data that can be fed into the [firefox profiler](https://profiler.firefox.com/) for a specific bytecode. - -## Fuzzing - -There is a fuzzer using this library at core/lib/vm/fuzz. The fuzz.sh script located there starts a fuzzer which -attempts to make cover as much code as it can to ultimately produce a valid deployment bytecode. - -It has no chance of succeeding currently because the fuzzing speed drops to 10 executions/s easily. Optimizing the VM or -lowering the gas limit will help with that. - -The fuzzer has been useful for producing synthetic benchmark inputs. It may be a good tool for finding show transactions -with a certain gas limit, an empirical way of evaluating gas prices of instructions. diff --git a/core/tests/vm-benchmark/benches/fill_bootloader.rs b/core/tests/vm-benchmark/benches/batch.rs similarity index 79% rename from core/tests/vm-benchmark/benches/fill_bootloader.rs rename to core/tests/vm-benchmark/benches/batch.rs index 13fa1df0b2fc..608f6be6d089 100644 --- a/core/tests/vm-benchmark/benches/fill_bootloader.rs +++ b/core/tests/vm-benchmark/benches/batch.rs @@ -14,17 +14,15 @@ use std::{iter, time::Duration}; -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - BenchmarkId, Criterion, Throughput, -}; +use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; use rand::{rngs::StdRng, Rng, SeedableRng}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, - get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, - BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, +use vm_benchmark::{ + criterion::{is_test_mode, BenchmarkGroup, BenchmarkId, CriterionExt, MeteredTime}, + get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, get_load_test_deploy_tx, + get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, BenchmarkingVm, + BenchmarkingVmFactory, Bytecode, Fast, Legacy, LoadTestParams, }; +use zksync_types::Transaction; /// Gas limit for deployment transactions. const DEPLOY_GAS_LIMIT: u32 = 30_000_000; @@ -59,7 +57,7 @@ fn bench_vm( } fn run_vm_expecting_failures( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], expected_failures: &[bool], @@ -70,25 +68,24 @@ fn run_vm_expecting_failures( } group.throughput(Throughput::Elements(*txs_in_batch as u64)); - group.bench_with_input( + group.bench_metered_with_input( BenchmarkId::new(name, txs_in_batch), txs_in_batch, |bencher, &txs_in_batch| { if FULL { // Include VM initialization / drop into the measured time - bencher.iter(|| { + bencher.iter(|timer| { + let _guard = timer.start(); let mut vm = BenchmarkingVm::::default(); bench_vm::<_, true>(&mut vm, &txs[..txs_in_batch], expected_failures); }); } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); - vm - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + bench_vm::<_, false>(&mut vm, &txs[..txs_in_batch], expected_failures); + drop(guard); + }); } }, ); @@ -96,22 +93,23 @@ fn run_vm_expecting_failures( } fn run_vm( - group: &mut BenchmarkGroup<'_, WallTime>, + group: &mut BenchmarkGroup<'_>, name: &str, txs: &[Transaction], ) { run_vm_expecting_failures::(group, name, txs, &[]); } -fn bench_fill_bootloader(c: &mut Criterion) { - let is_test_mode = !std::env::args().any(|arg| arg == "--bench"); - let txs_in_batch = if is_test_mode { +fn bench_fill_bootloader( + c: &mut Criterion, +) { + let txs_in_batch = if is_test_mode() { &TXS_IN_BATCH[..3] // Reduce the number of transactions in a batch so that tests don't take long } else { TXS_IN_BATCH }; - let mut group = c.benchmark_group(if FULL { + let mut group = c.metered_group(if FULL { format!("fill_bootloader_full{}", VM::LABEL.as_suffix()) } else { format!("fill_bootloader{}", VM::LABEL.as_suffix()) @@ -121,12 +119,12 @@ fn bench_fill_bootloader(c: &mut Cr .measurement_time(Duration::from_secs(10)); // Deploying simple contract - let test_contract = - std::fs::read("deployment_benchmarks/deploy_simple_contract").expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); + let test_contract = Bytecode::get("deploy_simple_contract"); let max_txs = *txs_in_batch.last().unwrap() as u32; let txs: Vec<_> = (0..max_txs) - .map(|nonce| get_deploy_tx_with_gas_limit(code, DEPLOY_GAS_LIMIT, nonce)) + .map(|nonce| { + get_deploy_tx_with_gas_limit(test_contract.bytecode(), DEPLOY_GAS_LIMIT, nonce) + }) .collect(); run_vm::(&mut group, "deploy_simple_contract", &txs); drop(txs); @@ -187,9 +185,12 @@ fn bench_fill_bootloader(c: &mut Cr } criterion_group!( - benches, - bench_fill_bootloader::, - bench_fill_bootloader::, - bench_fill_bootloader:: + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("fill_bootloader")); + targets = bench_fill_bootloader::, + bench_fill_bootloader::, + bench_fill_bootloader:: ); criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs deleted file mode 100644 index 9e12fc25f54c..000000000000 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::time::Duration; - -use criterion::{ - black_box, criterion_group, criterion_main, measurement::WallTime, BatchSize, BenchmarkGroup, - Criterion, -}; -use zksync_types::Transaction; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, - get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, LoadTestParams, -}; - -const SAMPLE_SIZE: usize = 20; - -fn benches_in_folder(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - let file_name = path.file_name().unwrap().to_str().unwrap(); - let full_suffix = if FULL { "/full" } else { "" }; - let bench_name = format!("{file_name}{full_suffix}"); - group.bench_function(bench_name, |bencher| { - if FULL { - // Include VM initialization / drop into the measured time - bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); - } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - let result = vm.run_transaction(black_box(&tx)); - (vm, result) - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); - } - }); - } -} - -fn bench_load_test(c: &mut Criterion) { - let mut group = c.benchmark_group(VM::LABEL.as_str()); - group - .sample_size(SAMPLE_SIZE) - .measurement_time(Duration::from_secs(10)); - - // Nonce 0 is used for the deployment transaction - let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); - bench_load_test_transaction::(&mut group, "load_test", &tx); - - let tx = get_realistic_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); - - let tx = get_heavy_load_test_tx(1); - bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); -} - -fn bench_load_test_transaction( - group: &mut BenchmarkGroup<'_, WallTime>, - name: &str, - tx: &Transaction, -) { - group.bench_function(name, |bencher| { - bencher.iter_batched( - || { - let mut vm = BenchmarkingVm::::default(); - vm.run_transaction(&get_load_test_deploy_tx()); - vm - }, - |mut vm| { - let result = vm.run_transaction(black_box(tx)); - assert!(!result.result.is_failed(), "{:?}", result.result); - (vm, result) - }, - BatchSize::LargeInput, - ); - }); -} - -criterion_group!( - benches, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - benches_in_folder::, - bench_load_test::, - bench_load_test:: -); -criterion_main!(benches); diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs deleted file mode 100644 index 1601de5eb85f..000000000000 --- a/core/tests/vm-benchmark/benches/diy_benchmark.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::time::{Duration, Instant}; - -use criterion::black_box; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - let mut results = vec![]; - - for path in std::fs::read_dir("deployment_benchmarks").unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("benchmarking: {}", name); - - let mut timings = vec![]; - let benchmark_start = Instant::now(); - while benchmark_start.elapsed() < Duration::from_secs(5) { - let start = Instant::now(); - BenchmarkingVm::new().run_transaction(black_box(&tx)); - timings.push(start.elapsed()); - } - - println!("{:?}", timings.iter().min().unwrap()); - results.push((name.to_owned(), timings)); - } - - if option_env!("PUSH_VM_BENCHMARKS_TO_PROMETHEUS").is_some() { - vm_benchmark::with_prometheus::with_prometheus(|| { - for (name, timings) in results { - for (i, timing) in timings.into_iter().enumerate() { - VM_BENCHMARK_METRICS.timing[&(name.clone(), i.to_string())].set(timing); - } - } - }); - } -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_benchmark")] -pub(crate) struct VmBenchmarkMetrics { - #[metrics(labels = ["benchmark", "run_no"])] - pub timing: LabeledFamily<(String, String), Gauge, 2>, -} - -#[vise::register] -pub(crate) static VM_BENCHMARK_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 2837a2345a5a..6b8965afa4f1 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -1,14 +1,8 @@ use iai::black_box; -use zksync_vm_benchmark_harness::{ - cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, - Legacy, -}; - -fn run_bytecode(path: &str) { - let test_contract = std::fs::read(path).expect("failed to read file"); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); +use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; +fn run_bytecode(name: &str) { + let tx = Bytecode::get(name).deploy_tx(); black_box(BenchmarkingVm::::default().run_transaction(&tx)); } @@ -16,11 +10,11 @@ macro_rules! make_functions_and_main { ($($file:ident => $legacy_name:ident,)+) => { $( fn $file() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } fn $legacy_name() { - run_bytecode::(concat!("deployment_benchmarks/", stringify!($file))); + run_bytecode::(stringify!($file)); } )+ diff --git a/core/tests/vm-benchmark/benches/oneshot.rs b/core/tests/vm-benchmark/benches/oneshot.rs new file mode 100644 index 000000000000..58a90af4981f --- /dev/null +++ b/core/tests/vm-benchmark/benches/oneshot.rs @@ -0,0 +1,91 @@ +use std::time::Duration; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use vm_benchmark::{ + criterion::{BenchmarkGroup, CriterionExt, MeteredTime}, + get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, + BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, LoadTestParams, BYTECODES, +}; +use zksync_types::Transaction; + +const SAMPLE_SIZE: usize = 20; + +fn benches_in_folder(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let bench_name = bytecode.name; + let full_suffix = if FULL { "/full" } else { "" }; + let bench_name = format!("{bench_name}{full_suffix}"); + + group.bench_metered(bench_name, |bencher| { + if FULL { + // Include VM initialization / drop into the measured time + bencher.iter(|timer| { + let _guard = timer.start(); + BenchmarkingVm::::default().run_transaction(black_box(&tx)); + }); + } else { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + let guard = timer.start(); + let _result = vm.run_transaction(black_box(&tx)); + drop(guard); // do not include latency of dropping `_result` + }); + } + }); + } +} + +fn bench_load_test(c: &mut Criterion) { + let mut group = c.metered_group(VM::LABEL.as_str()); + group + .sample_size(SAMPLE_SIZE) + .measurement_time(Duration::from_secs(10)); + + // Nonce 0 is used for the deployment transaction + let tx = get_load_test_tx(1, 10_000_000, LoadTestParams::default()); + bench_load_test_transaction::(&mut group, "load_test", &tx); + + let tx = get_realistic_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_realistic", &tx); + + let tx = get_heavy_load_test_tx(1); + bench_load_test_transaction::(&mut group, "load_test_heavy", &tx); +} + +fn bench_load_test_transaction( + group: &mut BenchmarkGroup<'_>, + name: &str, + tx: &Transaction, +) { + group.bench_metered(name, |bencher| { + bencher.iter(|timer| { + let mut vm = BenchmarkingVm::::default(); + vm.run_transaction(&get_load_test_deploy_tx()); + + let guard = timer.start(); + let result = vm.run_transaction(black_box(tx)); + drop(guard); // do not include the latency of `result` checks / drop + assert!(!result.result.is_failed(), "{:?}", result.result); + }); + }); +} + +criterion_group!( + name = benches; + config = Criterion::default() + .configure_from_args() + .with_measurement(MeteredTime::new("criterion")); + targets = benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + benches_in_folder::, + bench_load_test::, + bench_load_test:: +); +criterion_main!(benches); diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml deleted file mode 100644 index a24d3fa1294a..000000000000 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "zksync_vm_benchmark_harness" -version.workspace = true -edition.workspace = true -license.workspace = true -publish = false - -[dependencies] -zksync_multivm.workspace = true -zksync_types.workspace = true -zksync_state.workspace = true -zksync_utils.workspace = true -zksync_system_constants.workspace = true -zksync_contracts.workspace = true -zk_evm.workspace = true -once_cell.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/tests/vm-benchmark/src/parse_iai.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs similarity index 98% rename from core/tests/vm-benchmark/src/parse_iai.rs rename to core/tests/vm-benchmark/src/bin/common/mod.rs index 61376b429a32..a92c9d5f710c 100644 --- a/core/tests/vm-benchmark/src/parse_iai.rs +++ b/core/tests/vm-benchmark/src/bin/common/mod.rs @@ -1,5 +1,6 @@ use std::io::BufRead; +#[derive(Debug)] pub struct IaiResult { pub name: String, pub instructions: u64, diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs similarity index 98% rename from core/tests/vm-benchmark/src/compare_iai_results.rs rename to core/tests/vm-benchmark/src/bin/compare_iai_results.rs index d2c9d73f7e36..faf72a18f451 100644 --- a/core/tests/vm-benchmark/src/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -4,7 +4,9 @@ use std::{ io::{BufRead, BufReader}, }; -use vm_benchmark::parse_iai::parse_iai; +pub use crate::common::parse_iai; + +mod common; fn main() { let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs new file mode 100644 index 000000000000..3b3aa05bf69c --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs @@ -0,0 +1,52 @@ +use std::{env, io::BufReader, time::Duration}; + +use tokio::sync::watch; +use vise::{Gauge, LabeledFamily, Metrics}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +use crate::common::{parse_iai, IaiResult}; + +mod common; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_cachegrind")] +pub(crate) struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + pub instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + pub cycles: LabeledFamily>, +} + +#[vise::register] +pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + +#[tokio::main] +async fn main() { + let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); + + let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") + .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = + PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); + tokio::spawn(prometheus_config.run(stop_receiver)); + + for result in results { + let name = result.name; + VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); + VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); + VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); + VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); + } + + println!("Waiting for push to happen..."); + tokio::time::sleep(Duration::from_secs(1)).await; + stop_sender.send_replace(true); +} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs new file mode 100644 index 000000000000..f9bb04c01bff --- /dev/null +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -0,0 +1,11 @@ +//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. + +use vm_benchmark::{BenchmarkingVm, BYTECODES}; + +fn main() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let name = bytecode.name; + println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + } +} diff --git a/core/tests/vm-benchmark/deployment_benchmarks/access_memory b/core/tests/vm-benchmark/src/bytecodes/access_memory similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/access_memory rename to core/tests/vm-benchmark/src/bytecodes/access_memory diff --git a/core/tests/vm-benchmark/deployment_benchmarks/call_far b/core/tests/vm-benchmark/src/bytecodes/call_far similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/call_far rename to core/tests/vm-benchmark/src/bytecodes/call_far diff --git a/core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub b/core/tests/vm-benchmark/src/bytecodes/decode_shl_sub similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/decode_shl_sub rename to core/tests/vm-benchmark/src/bytecodes/decode_shl_sub diff --git a/core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract b/core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/deploy_simple_contract rename to core/tests/vm-benchmark/src/bytecodes/deploy_simple_contract diff --git a/core/tests/vm-benchmark/deployment_benchmarks/event_spam b/core/tests/vm-benchmark/src/bytecodes/event_spam similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/event_spam rename to core/tests/vm-benchmark/src/bytecodes/event_spam diff --git a/core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames b/core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/finish_eventful_frames rename to core/tests/vm-benchmark/src/bytecodes/finish_eventful_frames diff --git a/core/tests/vm-benchmark/deployment_benchmarks/heap_read_write b/core/tests/vm-benchmark/src/bytecodes/heap_read_write similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/heap_read_write rename to core/tests/vm-benchmark/src/bytecodes/heap_read_write diff --git a/core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision b/core/tests/vm-benchmark/src/bytecodes/slot_hash_collision similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/slot_hash_collision rename to core/tests/vm-benchmark/src/bytecodes/slot_hash_collision diff --git a/core/tests/vm-benchmark/deployment_benchmarks/write_and_decode b/core/tests/vm-benchmark/src/bytecodes/write_and_decode similarity index 100% rename from core/tests/vm-benchmark/deployment_benchmarks/write_and_decode rename to core/tests/vm-benchmark/src/bytecodes/write_and_decode diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs new file mode 100644 index 000000000000..9515ac4ef988 --- /dev/null +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -0,0 +1,477 @@ +//! Criterion helpers and extensions used to record benchmark timings as Prometheus metrics. + +use std::{ + cell::RefCell, + convert::Infallible, + env, fmt, mem, + rc::Rc, + sync::Once, + thread, + time::{Duration, Instant}, +}; + +use criterion::{ + measurement::{Measurement, ValueFormatter, WallTime}, + Criterion, Throughput, +}; +use once_cell::{sync::OnceCell as SyncOnceCell, unsync::OnceCell}; +use tokio::sync::watch; +use vise::{EncodeLabelSet, Family, Gauge, Metrics, Unit}; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Checks whether a benchmark binary is running in the test mode (as opposed to benchmarking). +pub fn is_test_mode() -> bool { + !env::args().any(|arg| arg == "--bench") +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] +struct BenchLabels { + bin: &'static str, + group: String, + benchmark: String, + arg: Option, +} + +// We don't use histograms because benchmark results are uploaded in short bursts, which leads to missing zero values. +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_benchmark")] +struct VmBenchmarkMetrics { + /// Number of samples for a benchmark. + sample_count: Family>, + + /// Mean latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + mean_timing: Family>, + /// Minimum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + min_timing: Family>, + /// Maximum latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + max_timing: Family>, + /// Median latency for a benchmark. + #[metrics(unit = Unit::Seconds)] + median_timing: Family>, +} + +#[vise::register] +static METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug)] +struct PrometheusRuntime { + stop_sender: watch::Sender, + _runtime: tokio::runtime::Runtime, +} + +impl Drop for PrometheusRuntime { + fn drop(&mut self) { + self.stop_sender.send_replace(true); + // Metrics are pushed automatically on exit, so we wait *after* sending a stop signal + println!("Waiting for Prometheus metrics to be pushed"); + thread::sleep(Duration::from_secs(1)); + } +} + +impl PrometheusRuntime { + fn new() -> Option { + const PUSH_INTERVAL: Duration = Duration::from_millis(100); + + let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; + let runtime = tokio::runtime::Runtime::new().expect("Failed initializing Tokio runtime"); + println!("Pushing Prometheus metrics to {gateway_url} each {PUSH_INTERVAL:?}"); + let (stop_sender, stop_receiver) = watch::channel(false); + let prometheus_config = PrometheusExporterConfig::push(gateway_url, PUSH_INTERVAL); + runtime.spawn(prometheus_config.run(stop_receiver)); + Some(Self { + stop_sender, + _runtime: runtime, + }) + } +} + +/// Guard returned by [`CurrentBenchmark::set()`] that unsets the current benchmark on drop. +#[must_use = "Will unset the current benchmark when dropped"] +#[derive(Debug)] +struct CurrentBenchmarkGuard; + +impl Drop for CurrentBenchmarkGuard { + fn drop(&mut self) { + CURRENT_BENCH.take(); + } +} + +#[derive(Debug)] +struct CurrentBenchmark { + metrics: &'static VmBenchmarkMetrics, + labels: BenchLabels, + observations: Vec, +} + +impl CurrentBenchmark { + fn set(metrics: &'static VmBenchmarkMetrics, labels: BenchLabels) -> CurrentBenchmarkGuard { + CURRENT_BENCH.replace(Some(Self { + metrics, + labels, + observations: vec![], + })); + CurrentBenchmarkGuard + } + + fn observe(timing: Duration) { + CURRENT_BENCH.with_borrow_mut(|this| { + if let Some(this) = this { + this.observations.push(timing); + } + }); + } +} + +impl Drop for CurrentBenchmark { + fn drop(&mut self) { + let mut observations = mem::take(&mut self.observations); + if observations.is_empty() { + return; + } + + let len = observations.len(); + self.metrics.sample_count[&self.labels].set(len); + let mean = observations + .iter() + .copied() + .sum::() + .div_f32(len as f32); + self.metrics.mean_timing[&self.labels].set(mean); + + // Could use quick median algorithm, but since there aren't that many observations expected, + // sorting looks acceptable. + observations.sort_unstable(); + let (min, max) = (observations[0], *observations.last().unwrap()); + self.metrics.min_timing[&self.labels].set(min); + self.metrics.max_timing[&self.labels].set(max); + let median = if len % 2 == 0 { + (observations[len / 2 - 1] + observations[len / 2]) / 2 + } else { + observations[len / 2] + }; + self.metrics.median_timing[&self.labels].set(median); + + println!("Exported timings: min={min:?}, max={max:?}, mean={mean:?}, median={median:?}"); + } +} + +thread_local! { + static CURRENT_BENCH: RefCell> = const { RefCell::new(None) }; +} + +static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); + +/// Measurement for criterion that exports . +#[derive(Debug)] +pub struct MeteredTime { + _prometheus: Option, +} + +impl MeteredTime { + pub fn new(bin_name: &'static str) -> Self { + static PROMETHEUS_INIT: Once = Once::new(); + + let mut prometheus = None; + if !is_test_mode() { + PROMETHEUS_INIT.call_once(|| { + prometheus = PrometheusRuntime::new(); + }); + } + + if let Err(prev_name) = BIN_NAME.set(bin_name) { + assert_eq!(prev_name, bin_name, "attempted to redefine binary name"); + } + + Self { + _prometheus: prometheus, + } + } +} + +impl Measurement for MeteredTime { + type Intermediate = Infallible; + type Value = Duration; + + fn start(&self) -> Self::Intermediate { + // All measurements must be done via `Bencher::iter()` + unreachable!("must not be invoked directly"); + } + + fn end(&self, _: Self::Intermediate) -> Self::Value { + unreachable!("must not be invoked directly"); + } + + fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value { + *v1 + *v2 + } + + fn zero(&self) -> Self::Value { + Duration::ZERO + } + + fn to_f64(&self, value: &Self::Value) -> f64 { + WallTime.to_f64(value) + } + + fn formatter(&self) -> &dyn ValueFormatter { + WallTime.formatter() + } +} + +/// Drop-in replacement for `criterion::BenchmarkId`. +pub struct BenchmarkId { + inner: criterion::BenchmarkId, + benchmark: String, + arg: String, +} + +impl BenchmarkId { + pub fn new, P: fmt::Display>(function_name: S, parameter: P) -> Self { + let function_name = function_name.into(); + Self { + benchmark: function_name.clone(), + arg: parameter.to_string(), + inner: criterion::BenchmarkId::new(function_name, parameter), + } + } +} + +/// Drop-in replacement for `criterion::BenchmarkGroup`. +pub struct BenchmarkGroup<'a> { + name: String, + inner: criterion::BenchmarkGroup<'a, MeteredTime>, + metrics: &'static VmBenchmarkMetrics, +} + +impl BenchmarkGroup<'_> { + pub fn sample_size(&mut self, size: usize) -> &mut Self { + self.inner.sample_size(size); + self + } + + pub fn throughput(&mut self, throughput: Throughput) -> &mut Self { + self.inner.throughput(throughput); + self + } + + pub fn measurement_time(&mut self, dur: Duration) -> &mut Self { + self.inner.measurement_time(dur); + self + } + + fn start_bench(&self, benchmark: String, arg: Option) -> CurrentBenchmarkGuard { + let labels = BenchLabels { + bin: BIN_NAME.get().copied().unwrap_or(""), + group: self.name.clone(), + benchmark, + arg, + }; + CurrentBenchmark::set(self.metrics, labels) + } + + pub fn bench_metered(&mut self, id: impl Into, mut bench_fn: F) + where + F: FnMut(&mut Bencher<'_, '_>), + { + let id = id.into(); + let _guard = self.start_bench(id.clone(), None); + self.inner + .bench_function(id, |bencher| bench_fn(&mut Bencher { inner: bencher })); + } + + pub fn bench_metered_with_input(&mut self, id: BenchmarkId, input: &I, mut bench_fn: F) + where + I: ?Sized, + F: FnMut(&mut Bencher<'_, '_>, &I), + { + let _guard = self.start_bench(id.benchmark, Some(id.arg)); + self.inner + .bench_with_input(id.inner, input, |bencher, input| { + bench_fn(&mut Bencher { inner: bencher }, input) + }); + } +} + +pub struct Bencher<'a, 'r> { + inner: &'r mut criterion::Bencher<'a, MeteredTime>, +} + +impl Bencher<'_, '_> { + pub fn iter(&mut self, mut routine: impl FnMut(BenchmarkTimer)) { + self.inner.iter_custom(move |iters| { + let mut total = Duration::ZERO; + for _ in 0..iters { + let timer = BenchmarkTimer::new(); + let observation = timer.observation.clone(); + routine(timer); + let timing = observation.get().copied().unwrap_or_default(); + CurrentBenchmark::observe(timing); + total += timing; + } + total + }) + } +} + +/// Timer for benchmarks supplied to the `Bencher::iter()` closure. +#[derive(Debug)] +#[must_use = "should be started to start measurements"] +pub struct BenchmarkTimer { + observation: Rc>, +} + +impl BenchmarkTimer { + fn new() -> Self { + Self { + observation: Rc::default(), + } + } + + /// Starts the timer. The timer will remain active until the returned guard is dropped. If you drop the timer implicitly, + /// be careful with the drop order (inverse to the variable declaration order); when in doubt, drop the guard explicitly. + pub fn start(self) -> BenchmarkTimerGuard { + BenchmarkTimerGuard { + started_at: Instant::now(), + observation: self.observation, + } + } +} + +/// Guard returned from [`BenchmarkTimer::start()`]. +#[derive(Debug)] +#[must_use = "will stop the timer on drop"] +pub struct BenchmarkTimerGuard { + started_at: Instant, + observation: Rc>, +} + +impl Drop for BenchmarkTimerGuard { + fn drop(&mut self) { + let latency = self.started_at.elapsed(); + self.observation.set(latency).ok(); + } +} + +pub trait CriterionExt { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_>; +} + +impl CriterionExt for Criterion { + fn metered_group(&mut self, name: impl Into) -> BenchmarkGroup<'_> { + let name = name.into(); + BenchmarkGroup { + inner: self.benchmark_group(name.clone()), + name, + metrics: &METRICS, + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + use crate::BYTECODES; + + fn test_benchmark(c: &mut Criterion, metrics: &'static VmBenchmarkMetrics) { + let mut group = c.metered_group("single"); + group.metrics = metrics; + for bytecode in BYTECODES { + group.bench_metered(bytecode.name, |bencher| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }) + }); + } + drop(group); + + let mut group = c.metered_group("with_arg"); + group.metrics = metrics; + for bytecode in BYTECODES { + for arg in [1, 10, 100] { + group.bench_metered_with_input( + BenchmarkId::new(bytecode.name, arg), + &arg, + |bencher, _arg| { + bencher.iter(|timer| { + let _guard = timer.start(); + thread::sleep(Duration::from_millis(1)) + }); + }, + ) + } + } + } + + #[test] + fn recording_benchmarks() { + let metered_time = MeteredTime::new("test"); + let metrics = &*Box::leak(Box::::default()); + + let mut criterion = Criterion::default() + .warm_up_time(Duration::from_millis(10)) + .measurement_time(Duration::from_millis(10)) + .sample_size(10) + .with_measurement(metered_time); + test_benchmark(&mut criterion, metrics); + + let timing_labels: HashSet<_> = metrics.mean_timing.to_entries().into_keys().collect(); + // Check that labels are as expected. + for bytecode in BYTECODES { + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "single".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: None, + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("1".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("10".to_owned()), + })); + assert!(timing_labels.contains(&BenchLabels { + bin: "test", + group: "with_arg".to_owned(), + benchmark: bytecode.name.to_owned(), + arg: Some("100".to_owned()), + })); + } + assert_eq!( + timing_labels.len(), + 4 * BYTECODES.len(), + "{timing_labels:#?}" + ); + + // Sanity-check relations among collected metrics + for label in &timing_labels { + let mean = metrics.mean_timing[label].get(); + let min = metrics.min_timing[label].get(); + let max = metrics.max_timing[label].get(); + let median = metrics.median_timing[label].get(); + assert!( + min > Duration::ZERO, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + min <= mean && min <= median, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + assert!( + mean <= max && median <= max, + "min={min:?}, mean={mean:?}, median = {median:?}, max={max:?}" + ); + } + } +} diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs deleted file mode 100644 index 97a6acd5acd9..000000000000 --- a/core/tests/vm-benchmark/src/find_slowest.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::{ - io::Write, - time::{Duration, Instant}, -}; - -use zksync_vm_benchmark_harness::*; - -fn main() { - let mut results = vec![]; - - let arg = std::env::args() - .nth(1) - .expect("Expected directory of contracts to rank as first argument."); - let files = std::fs::read_dir(arg).expect("Failed to list dir"); - - let mut last_progress_update = Instant::now(); - - for (i, file) in files.enumerate() { - let path = file.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - if let Some(code) = cut_to_allowed_bytecode_size(&test_contract) { - let tx = get_deploy_tx(code); - - let start_time = Instant::now(); - BenchmarkingVm::new().run_transaction(&tx); - results.push((start_time.elapsed(), path)); - } - - if last_progress_update.elapsed() > Duration::from_millis(100) { - print!("\r{}", i); - std::io::stdout().flush().unwrap(); - last_progress_update = Instant::now(); - } - } - println!(); - - results.sort(); - for (time, path) in results.iter().rev().take(30) { - println!("{} took {:?}", path.display(), time); - } -} diff --git a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs deleted file mode 100644 index d419603bae87..000000000000 --- a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::io::BufReader; - -use vise::{Gauge, LabeledFamily, Metrics}; -use vm_benchmark::parse_iai::IaiResult; - -fn main() { - let results: Vec = - vm_benchmark::parse_iai::parse_iai(BufReader::new(std::io::stdin())).collect(); - - vm_benchmark::with_prometheus::with_prometheus(|| { - for r in results { - VM_CACHEGRIND_METRICS.instructions[&r.name.clone()].set(r.instructions as f64); - VM_CACHEGRIND_METRICS.l1_accesses[&r.name.clone()].set(r.l1_accesses as f64); - VM_CACHEGRIND_METRICS.l2_accesses[&r.name.clone()].set(r.l2_accesses as f64); - VM_CACHEGRIND_METRICS.ram_accesses[&r.name.clone()].set(r.ram_accesses as f64); - VM_CACHEGRIND_METRICS.cycles[&r.name.clone()].set(r.cycles as f64); - } - }) -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); diff --git a/core/tests/vm-benchmark/harness/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs similarity index 100% rename from core/tests/vm-benchmark/harness/src/instruction_counter.rs rename to core/tests/vm-benchmark/src/instruction_counter.rs diff --git a/core/tests/vm-benchmark/src/instruction_counts.rs b/core/tests/vm-benchmark/src/instruction_counts.rs deleted file mode 100644 index c038c8f2bf6b..000000000000 --- a/core/tests/vm-benchmark/src/instruction_counts.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. - -use std::path::Path; - -use zksync_vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; - -fn main() { - // using source file location because this is just a script, the binary isn't meant to be reused - let benchmark_folder = Path::new(file!()) - .parent() - .unwrap() - .parent() - .unwrap() - .join("deployment_benchmarks"); - - for path in std::fs::read_dir(benchmark_folder).unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - - let name = path.file_name().unwrap().to_str().unwrap(); - - println!("{} {}", name, BenchmarkingVm::new().instruction_count(&tx)); - } -} diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 38cc311105b3..4bd008d33196 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -1,2 +1,72 @@ -pub mod parse_iai; -pub mod with_prometheus; +use zksync_types::Transaction; + +pub use crate::{ + transaction::{ + get_deploy_tx, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, + get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, + LoadTestParams, + }, + vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, +}; + +pub mod criterion; +mod instruction_counter; +mod transaction; +mod vm; + +#[derive(Debug, Clone, Copy)] +pub struct Bytecode { + pub name: &'static str, + raw_bytecode: &'static [u8], +} + +impl Bytecode { + pub fn get(name: &str) -> Self { + BYTECODES + .iter() + .find(|bytecode| bytecode.name == name) + .copied() + .unwrap_or_else(|| panic!("bytecode `{name}` is not defined")) + } + + /// Bytecodes must consist of an odd number of 32 byte words. + /// This function "fixes" bytecodes of wrong length by cutting off their end. + fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> &[u8] { + let mut words = bytes.len() / 32; + assert!(words > 0, "bytecode is empty"); + + if words & 1 == 0 { + words -= 1; + } + &bytes[..32 * words] + } + + pub fn bytecode(&self) -> &'static [u8] { + Self::cut_to_allowed_bytecode_size(self.raw_bytecode) + } + + pub fn deploy_tx(&self) -> Transaction { + get_deploy_tx(self.bytecode()) + } +} + +macro_rules! include_bytecode { + ($name:ident) => { + Bytecode { + name: stringify!($name), + raw_bytecode: include_bytes!(concat!("bytecodes/", stringify!($name))), + } + }; +} + +pub const BYTECODES: &[Bytecode] = &[ + include_bytecode!(access_memory), + include_bytecode!(call_far), + include_bytecode!(decode_shl_sub), + include_bytecode!(deploy_simple_contract), + include_bytecode!(event_spam), + include_bytecode!(finish_eventful_frames), + include_bytecode!(heap_read_write), + include_bytecode!(slot_hash_collision), + include_bytecode!(write_and_decode), +]; diff --git a/core/tests/vm-benchmark/src/main.rs b/core/tests/vm-benchmark/src/main.rs index 925ec78ceb3c..6e2b397d746d 100644 --- a/core/tests/vm-benchmark/src/main.rs +++ b/core/tests/vm-benchmark/src/main.rs @@ -1,16 +1,10 @@ -use zksync_vm_benchmark_harness::*; +use vm_benchmark::{BenchmarkingVm, Bytecode}; fn main() { - let test_contract = std::fs::read( - std::env::args() - .nth(1) - .expect("please provide an input file"), - ) - .expect("failed to read file"); - - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - + let bytecode_name = std::env::args() + .nth(1) + .expect("please provide bytecode name, e.g. 'access_memory'"); + let tx = Bytecode::get(&bytecode_name).deploy_tx(); for _ in 0..100 { let mut vm = BenchmarkingVm::new(); vm.run_transaction(&tx); diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs new file mode 100644 index 000000000000..90e1c6360b81 --- /dev/null +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -0,0 +1,194 @@ +use once_cell::sync::Lazy; +pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_contracts::{deployer_contract, TestContract}; +use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +use zksync_types::{ + ethabi::{encode, Token}, + fee::Fee, + l2::L2Tx, + utils::deployed_address_create, + Address, K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, Transaction, + CONTRACT_DEPLOYER_ADDRESS, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +const LOAD_TEST_MAX_READS: usize = 100; + +pub(crate) static PRIVATE_KEY: Lazy = + Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); +static LOAD_TEST_CONTRACT_ADDRESS: Lazy

= + Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); + +static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); + +static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { + deployer_contract() + .function("create") + .unwrap() + .short_signature() +}); + +pub fn get_deploy_tx(code: &[u8]) -> Transaction { + get_deploy_tx_with_gas_limit(code, 30_000_000, 0) +} + +pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { + let mut salt = vec![0_u8; 32]; + salt[28..32].copy_from_slice(&nonce.to_be_bytes()); + let params = [ + Token::FixedBytes(salt), + Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::Bytes([].to_vec()), + ]; + let calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + vec![code.to_vec()], // maybe not needed? + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +pub fn get_transfer_tx(nonce: u32) -> Transaction { + let mut signed = L2Tx::new_signed( + PRIVATE_KEY.address(), + vec![], // calldata + Nonce(nonce), + tx_fee(1_000_000), + 1_000_000_000.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_deploy_tx() -> Transaction { + let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; + let params = [ + Token::FixedBytes(vec![0_u8; 32]), + Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), + Token::Bytes(encode(&calldata)), + ]; + let create_calldata = CREATE_FUNCTION_SIGNATURE + .iter() + .cloned() + .chain(encode(¶ms)) + .collect(); + + let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); + factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); + + let mut signed = L2Tx::new_signed( + CONTRACT_DEPLOYER_ADDRESS, + create_calldata, + Nonce(0), + tx_fee(100_000_000), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + factory_deps, + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { + assert!( + params.reads <= LOAD_TEST_MAX_READS, + "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" + ); + + let execute_function = LOAD_TEST_CONTRACT + .contract + .function("execute") + .expect("no `execute` function in load test contract"); + let calldata = execute_function + .encode_input(&vec![ + Token::Uint(U256::from(params.reads)), + Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.hashes)), + Token::Uint(U256::from(params.events)), + Token::Uint(U256::from(params.recursive_calls)), + Token::Uint(U256::from(params.deploys)), + ]) + .expect("cannot encode `execute` inputs"); + + let mut signed = L2Tx::new_signed( + *LOAD_TEST_CONTRACT_ADDRESS, + calldata, + Nonce(nonce), + tx_fee(gas_limit), + U256::zero(), + L2ChainId::from(270), + &PRIVATE_KEY, + LOAD_TEST_CONTRACT.factory_deps.clone(), + Default::default(), + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 30, + writes: 2, + events: 5, + hashes: 10, + recursive_calls: 0, + deploys: 0, + }, + ) +} + +pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { + get_load_test_tx( + nonce, + 10_000_000, + LoadTestParams { + reads: 100, + writes: 5, + events: 20, + hashes: 100, + recursive_calls: 20, + deploys: 5, + }, + ) +} diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/src/vm.rs similarity index 54% rename from core/tests/vm-benchmark/harness/src/lib.rs rename to core/tests/vm-benchmark/src/vm.rs index 6460d25a8e8d..f3c00667c7dd 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -1,51 +1,28 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; -pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; -use zksync_contracts::{deployer_contract, BaseSystemContracts, TestContract}; +use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, - utils::get_max_gas_per_pubdata_byte, vm_fast, vm_latest, vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ - block::L2BlockHasher, - ethabi::{encode, Token}, - fee::Fee, - fee_model::BatchFeeInput, - helpers::unix_timestamp_ms, - l2::L2Tx, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256, + block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, + utils::storage_key_for_eth_balance, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + Transaction, }; use zksync_utils::bytecode::hash_bytecode; -mod instruction_counter; +use crate::transaction::PRIVATE_KEY; -/// Bytecodes have consist of an odd number of 32 byte words -/// This function "fixes" bytecodes of wrong length by cutting off their end. -pub fn cut_to_allowed_bytecode_size(bytes: &[u8]) -> Option<&[u8]> { - let mut words = bytes.len() / 32; - if words == 0 { - return None; - } - - if words & 1 == 0 { - words -= 1; - } - Some(&bytes[..32 * words]) -} - -const LOAD_TEST_MAX_READS: usize = 100; - -static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= - Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); +static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); static STORAGE: Lazy = Lazy::new(|| { let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); @@ -56,20 +33,6 @@ static STORAGE: Lazy = Lazy::new(|| { storage }); -static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); - -static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); - -static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { - deployer_contract() - .function("create") - .unwrap() - .short_signature() -}); - -static PRIVATE_KEY: Lazy = - Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); - /// VM label used to name `criterion` benchmarks. #[derive(Debug, Clone, Copy)] pub enum VmLabel { @@ -229,178 +192,17 @@ impl BenchmarkingVm { } } -pub fn get_deploy_tx(code: &[u8]) -> Transaction { - get_deploy_tx_with_gas_limit(code, 30_000_000, 0) -} - -pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { - let mut salt = vec![0_u8; 32]; - salt[28..32].copy_from_slice(&nonce.to_be_bytes()); - let params = [ - Token::FixedBytes(salt), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), - Token::Bytes([].to_vec()), - ]; - let calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - vec![code.to_vec()], // maybe not needed? - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -fn tx_fee(gas_limit: u32) -> Fee { - Fee { - gas_limit: U256::from(gas_limit), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(0), - gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( - ProtocolVersionId::latest().into(), - )), - } -} - -pub fn get_transfer_tx(nonce: u32) -> Transaction { - let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), - vec![], // calldata - Nonce(nonce), - tx_fee(1_000_000), - 1_000_000_000.into(), // value - L2ChainId::from(270), - &PRIVATE_KEY, - vec![], // factory deps - Default::default(), // paymaster params - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_deploy_tx() -> Transaction { - let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; - let params = [ - Token::FixedBytes(vec![0_u8; 32]), - Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), - Token::Bytes(encode(&calldata)), - ]; - let create_calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); - factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); - - let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, - create_calldata, - Nonce(0), - tx_fee(100_000_000), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - factory_deps, - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { - assert!( - params.reads <= LOAD_TEST_MAX_READS, - "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" - ); - - let execute_function = LOAD_TEST_CONTRACT - .contract - .function("execute") - .expect("no `execute` function in load test contract"); - let calldata = execute_function - .encode_input(&vec![ - Token::Uint(U256::from(params.reads)), - Token::Uint(U256::from(params.writes)), - Token::Uint(U256::from(params.hashes)), - Token::Uint(U256::from(params.events)), - Token::Uint(U256::from(params.recursive_calls)), - Token::Uint(U256::from(params.deploys)), - ]) - .expect("cannot encode `execute` inputs"); - - let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - LOAD_TEST_CONTRACT.factory_deps.clone(), - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() -} - -pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 30, - writes: 2, - events: 5, - hashes: 10, - recursive_calls: 0, - deploys: 0, - }, - ) -} - -pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { - get_load_test_tx( - nonce, - 10_000_000, - LoadTestParams { - reads: 100, - writes: 5, - events: 20, - hashes: 100, - recursive_calls: 20, - deploys: 5, - }, - ) -} - #[cfg(test)] mod tests { use assert_matches::assert_matches; use zksync_contracts::read_bytecode; use zksync_multivm::interface::ExecutionResult; - use crate::*; + use super::*; + use crate::{ + get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + }; #[test] fn can_deploy_contract() { diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs deleted file mode 100644 index f9b79adedc09..000000000000 --- a/core/tests/vm-benchmark/src/with_prometheus.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::time::Duration; - -use tokio::sync::watch; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -pub fn with_prometheus(f: F) { - tokio::runtime::Runtime::new() - .unwrap() - .block_on(with_prometheus_async(f)); -} - -async fn with_prometheus_async(f: F) { - println!("Pushing results to Prometheus"); - - let endpoint = - "http://vmagent.stage.matterlabs.corp/api/v1/import/prometheus/metrics/job/vm-benchmark"; - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - f(); - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/deny.toml b/deny.toml index 1e4a30ad6231..c2775fc057c8 100644 --- a/deny.toml +++ b/deny.toml @@ -1,15 +1,22 @@ +[graph] +targets = [] all-features = false no-default-features = false +[output] +feature-depth = 1 + [advisories] -vulnerability = "deny" -unmaintained = "warn" -yanked = "warn" -notice = "warn" -ignore = [] +ignore = [ + "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork + "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork + "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` + # all below caused by StructOpt which we still use and we should move to clap v3 instead + "RUSTSEC-2021-0145", + "RUSTSEC-2021-0139", +] [licenses] -unlicensed = "deny" allow = [ "MIT", "Apache-2.0", @@ -22,35 +29,45 @@ allow = [ "BSD-3-Clause", "Zlib", "OpenSSL", + "Apache-2.0 WITH LLVM-exception", + "0BSD", ] -copyleft = "warn" -allow-osi-fsf-free = "neither" -default = "deny" confidence-threshold = 0.8 -exceptions = [ - { name = "ring", allow = ["OpenSSL"] }, -] -unused-allowed-license = "allow" [[licenses.clarify]] -name = "ring" -expression = "OpenSSL" -license-files = [ - { path = "LICENSE", hash = 0xbd0eed23 }, -] +crate = "ring" +# SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses +# https://spdx.org/licenses/OpenSSL.html +# ISC - Both BoringSSL and ring use this for their new files +# MIT - "Files in third_party/ have their own licenses, as described therein. The MIT +# license, for third_party/fiat, which, unlike other third_party directories, is +# compiled into non-test libraries, is included below." +# OpenSSL - Obviously +expression = "ISC AND MIT AND OpenSSL" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] [licenses.private] ignore = false +registries = [] [bans] -multiple-versions = "warn" +multiple-versions = "allow" wildcards = "allow" highlight = "all" workspace-default-features = "allow" external-default-features = "allow" +allow = [] +deny = [] +skip = [] +skip-tree = [] [sources] unknown-registry = "deny" unknown-git = "allow" allow-registry = ["https://github.com/rust-lang/crates.io-index"] allow-git = [] + +[sources.allow-org] +github = [] +gitlab = [] +bitbucket = [] diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index 08d01390d770..e0f751130eb0 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 92a7b0b00887..f2089446a41d 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index bbd61715842d..35c6c3778f22 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 68feb0769c23..7e1b52f83347 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,9 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - type: bind source: ./volumes/reth/data @@ -11,13 +13,12 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + postgres: image: "postgres:14" - command: postgres -c 'max_connections=200' + command: postgres -c 'max_connections=1000' ports: - 127.0.0.1:5432:5432 volumes: @@ -54,3 +55,4 @@ services: - "host:host-gateway" profiles: - runner + network_mode: host diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 6f7df349d66f..7ed1906b8574 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -34,7 +34,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 2); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -55,7 +55,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 02ca4a3b77b0..45f2ffa51b04 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -37,7 +37,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys COPY setup_2\^24.key /setup_2\^24.key diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index c53f27818687..de59451fee8f 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -11,7 +11,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri_gateway /usr/bin/ diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile index 248f6aaf35fe..06a1ff532b57 100644 --- a/docker/prover-gpu-fri-gar/Dockerfile +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -9,7 +9,7 @@ COPY *.bin / RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY --from=prover prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY --from=prover prover/data/keys/ /prover/data/keys/ COPY --from=prover /usr/bin/zksync_prover_fri /usr/bin/ ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 1f1aaa447f22..ad3ff1ff7197 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -31,7 +31,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri /usr/bin/ diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 4f7c00aa2ef9..2eebe07515e4 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -13,7 +13,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_generator /usr/bin/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index d1bc1e29c5fa..e315f670101a 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -12,7 +12,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_vector_generator /usr/bin/ diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 375384bf7fca..53e532653111 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -25,10 +25,34 @@ RUN git submodule update --init --recursive # Build Solidity WORKDIR /solidity/build -RUN cmake .. -RUN make +# The default compilation is Release with Debug symbols, which is quite large. +RUN cmake .. -DCMAKE_BUILD_TYPE="Release" +RUN make -j -FROM debian:bookworm as rust-lightweight +# Rust binaries - with a separate builder. +FROM rust:slim-bookworm as rust-builder + +ARG ARCH=amd64 +RUN apt-get update && apt-get install -y \ + libssl-dev \ + pkg-config \ + libclang-15-dev \ + g++ \ + cmake \ + git + +RUN cargo install --version=0.8.0 sqlx-cli +RUN cargo install cargo-nextest +RUN cargo install cargo-spellcheck +RUN cargo install sccache + +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + +# Main builder. +FROM debian:bookworm as rust-lightweight-base ARG ARCH=amd64 @@ -69,7 +93,7 @@ RUN apt-get update && \ lldb-15 \ lld-15 \ liburing-dev \ - libclang-dev + libclang-15-dev # Install Docker RUN apt-get update && \ @@ -97,27 +121,28 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH # Install gloud for GCR/GAR login +# Google was super lazy, and their package is around 1 GB. +# So we trim it a little bit based on info from `https://github.com/GoogleCloudPlatform/gsutil/issues/1732` ENV GCLOUD_VERSION=451.0.1 RUN echo "deb [arch=${ARCH}] http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ wget -c -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ apt-get update -y && apt-get install google-cloud-cli=${GCLOUD_VERSION}-0 --no-install-recommends -y && \ gcloud config set core/disable_usage_reporting true && \ gcloud config set component_manager/disable_update_check true && \ - gcloud config set metrics/environment github_docker_image - -RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y && \ - rustup default stable - -RUN cargo install --version=0.8.0 sqlx-cli -RUN cargo install cargo-nextest - -# Installing foundry-zksync from git is failing, we will build it from sources -# Install foundry -RUN git clone https://github.com/matter-labs/foundry-zksync -RUN cd foundry-zksync && cargo build --release --bins -RUN mv ./foundry-zksync/target/release/forge /usr/local/bin/ -RUN mv ./foundry-zksync/target/release/cast /usr/local/bin/ - + gcloud config set metrics/environment github_docker_image && \ + rm -rf $(find /usr/lib/google-cloud-sdk/ -regex ".*/__pycache__") && \ + rm -rf /usr/lib/google-cloud-sdk/bin/anthoscli && \ + rm -rf /usr/lib/google-cloud-sdk/platform/bundledpythonunix && \ + rm -rf /usr/lib/google-cloud-sdk/data/gcloud.json + +COPY --from=rust-builder /usr/local/cargo/bin/sqlx \ + /usr/local/cargo/bin/cargo-sqlx \ + /usr/local/cargo/bin/cargo-nextest \ + /usr/local/cargo/bin/cargo-spellcheck \ + /usr/local/cargo/bin/sccache \ + /usr/local/cargo/bin/forge \ + /usr/local/cargo/bin/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. COPY --from=solidity-builder /solidity/build/solc/solc /usr/bin/ @@ -133,7 +158,7 @@ RUN apt-get remove valgrind -y # We need valgrind 3.20, which is unavailable in repos or ppa, so we will build it from source RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ tar -xf valgrind-3.20.0.tar.bz2 && \ - cd valgrind-3.20.0 && ./configure && make && make install && \ + cd valgrind-3.20.0 && ./configure && make -j && make install && \ cd ../ && rm -rf valgrind-3.20.0.tar.bz2 && rm -rf valgrind-3.20.0 @@ -141,10 +166,13 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ ENV ZKSYNC_HOME=/usr/src/zksync ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" ENV CI=1 -RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache -FROM rust-lightweight as rust-lightweight-nightly +# If target is 'main' - then install default rust. +FROM rust-lightweight-base as rust-lightweight +RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y + -RUN rustup install nightly-2024-08-01 && \ - rustup default nightly-2024-08-01 +# If target is nightly - then install only nightly rust. +FROM rust-lightweight-base as rust-lightweight-nightly +RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y --default-toolchain nightly-2024-08-01 \ No newline at end of file diff --git a/docs/guides/advanced/15_prover_keys.md b/docs/guides/advanced/15_prover_keys.md index 9f562b49d299..5a3a264e8ddd 100644 --- a/docs/guides/advanced/15_prover_keys.md +++ b/docs/guides/advanced/15_prover_keys.md @@ -118,9 +118,9 @@ friendly hash function (currently Poseidon2). [recursive_circuit_list]: https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/circuit_definitions/recursion_layer/mod.rs#L29 [verification_key_list]: - https://github.com/matter-labs/zksync-era/tree/boojum-integration/prover/vk_setup_data_generator_server_fri/data + https://github.com/matter-labs/zksync-era/tree/6d18061df4a18803d3c6377305ef711ce60317e1/prover/data/keys [env_variables_for_hash]: - https://github.com/matter-labs/zksync-era/blob/boojum-integration/etc/env/base/contracts.toml#L44 + https://github.com/matter-labs/zksync-era/blob/6d18061df4a18803d3c6377305ef711ce60317e1/etc/env/base/contracts.toml#L61 [prover_setup_data]: https://github.com/matter-labs/zksync-era/blob/d2ca29bf20b4ec2d9ec9e327b4ba6b281d9793de/prover/vk_setup_data_generator_server_fri/src/lib.rs#L61 [verifier_computation]: diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index 99de05ff2c11..dfbc7a5366c5 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -16,7 +16,10 @@ Recovery from a snapshot consists of several parts. to take about 1 hour on the mainnet. - **Merkle tree** recovery starts once Postgres is fully recovered. Merkle tree recovery can take about 3 hours on the mainnet. Ordinarily, Merkle tree recovery is a blocker for node synchronization; i.e., the node will not process - blocks newer than the snapshot block until the Merkle tree is recovered. + blocks newer than the snapshot block until the Merkle tree is recovered. If the [treeless mode](10_treeless_mode.md) + is enabled, tree recovery is not performed, and the node will start catching up blocks immediately after Postgres + recovery. This is still true if the tree data fetcher is enabled _together_ with a Merkle tree; tree recovery is + asynchronous in this case. - Recovering RocksDB-based **VM state cache** is concurrent with Merkle tree recovery and also depends on Postgres recovery. It takes about 1 hour on the mainnet. Unlike Merkle tree recovery, VM state cache is not necessary for node operation (the node will get the state from Postgres is if it is absent), although it considerably speeds up VM @@ -24,7 +27,8 @@ Recovery from a snapshot consists of several parts. After Postgres recovery is completed, the node becomes operational, providing Web3 API etc. It still needs some time to catch up executing blocks after the snapshot (i.e, roughly several hours worth of blocks / transactions). This may take -order of 1–2 hours on the mainnet. In total, recovery process and catch-up thus should take roughly 5–6 hours. +order of 1–2 hours on the mainnet. In total, recovery process and catch-up thus should take roughly 5–6 hours with a +Merkle tree, or 3–4 hours in the treeless mode / with a tree data fetcher. ## Current limitations diff --git a/etc/contracts-test-data/contracts/counter/counter.sol b/etc/contracts-test-data/contracts/counter/counter.sol index 748ab91aa70f..c0f4bda130d0 100644 --- a/etc/contracts-test-data/contracts/counter/counter.sol +++ b/etc/contracts-test-data/contracts/counter/counter.sol @@ -5,7 +5,7 @@ pragma solidity ^0.8.0; contract Counter { uint256 value; - function increment(uint256 x) public { + function increment(uint256 x) external { value += x; } diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/etc/contracts-test-data/contracts/counter/proxy_counter.sol new file mode 100644 index 000000000000..b3bbf9dda93c --- /dev/null +++ b/etc/contracts-test-data/contracts/counter/proxy_counter.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +interface ICounter { + function increment(uint256 x) external; +} + +contract ProxyCounter { + ICounter counter; + + constructor(ICounter _counter) { + counter = _counter; + } + + uint256 lastFarCallCost; + + function increment(uint256 x, uint gasToPass) public { + while (gasleft() > gasToPass) { + // Burn gas so that there's about `gasToPass` left before the external call. + } + uint256 gasBefore = gasleft(); + counter.increment(x); + lastFarCallCost = gasBefore - gasleft(); + } +} diff --git a/etc/contracts-test-data/contracts/failed-call/failed_call.sol b/etc/contracts-test-data/contracts/failed-call/failed_call.sol new file mode 100644 index 000000000000..7a8f43fbd895 --- /dev/null +++ b/etc/contracts-test-data/contracts/failed-call/failed_call.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +contract FailedCall { + bool public success; + bytes1 public data_first_byte; + + constructor() { + address MSG_VALUE_SIMULATOR = 0x0000000000000000000000000000000000008009; + + while (gasleft() > 20000) { + // Burn gas so that there's about 20k left before the external call. + } + + // This call fails because MSG_VALUE_SIMULATOR forcibly takes 27k gas + (bool s, bytes memory data) = MSG_VALUE_SIMULATOR.call( + abi.encodeWithSignature("deadBeef()") + ); + + success = s; + data_first_byte = data[0]; + } +} diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index 1c93752251bc..d09991312ae5 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -1,5 +1,5 @@ [fri_prover] -setup_data_path = "crates/bin/vk_setup_data_generator_server_fri/data" +setup_data_path = "data/keys" prometheus_port = 3315 max_attempts = 10 generation_timeout_in_secs = 600 diff --git a/etc/env/base/house_keeper.toml b/etc/env/base/house_keeper.toml index 9596f63d062f..6f86561d1c60 100644 --- a/etc/env/base/house_keeper.toml +++ b/etc/env/base/house_keeper.toml @@ -1,15 +1,2 @@ [house_keeper] l1_batch_metrics_reporting_interval_ms = 10000 -gpu_prover_queue_reporting_interval_ms = 10000 -witness_generator_stats_reporting_interval_ms = 10000 -witness_job_moving_interval_ms = 40000 -prover_job_retrying_interval_ms = 30000 -witness_generator_job_retrying_interval_ms = 30000 -prover_db_pool_size = 2 -prover_stats_reporting_interval_ms = 50000 -proof_compressor_job_retrying_interval_ms = 30000 -proof_compressor_stats_reporting_interval_ms = 10000 -prover_job_archiver_archiving_interval_ms = 1800000 -prover_job_archiver_archive_after_secs = 172800 -fri_gpu_prover_archiver_archiving_interval_ms = 86400000 -fri_gpu_prover_archiver_archive_after_secs = 172800 \ No newline at end of file diff --git a/etc/env/configs/dev_validium_docker.toml b/etc/env/configs/dev_validium_docker.toml index 7e985cb974ab..0d619e9d6a60 100644 --- a/etc/env/configs/dev_validium_docker.toml +++ b/etc/env/configs/dev_validium_docker.toml @@ -1,12 +1,12 @@ -__imports__ = [ "base", "l1-inits/.init.env", "l2-inits/dev_validium_docker.init.env" ] +__imports__ = ["base", "l1-inits/.init.env", "l2-inits/dev_validium_docker.init.env"] -database_url = "postgres://postgres:notsecurepassword@postgres/zksync_local" -database_prover_url = "postgres://postgres:notsecurepassword@postgres/prover_local" -test_database_url = "postgres://postgres:notsecurepassword@host:5433/zksync_local_test" -test_database_prover_url = "postgres://postgres:notsecurepassword@host:5433/prover_local_test" +database_url = "postgres://postgres:notsecurepassword@localhost:5432/zksync_local" +database_prover_url = "postgres://postgres:notsecurepassword@localhost:5432/prover_local" +test_database_url = "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test" +test_database_prover_url = "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test" # for loadtest -l1_rpc_address = "http://reth:8545" +l1_rpc_address = "http://localhost:8545" [chain.state_keeper] compute_overhead_part = 1.0 @@ -20,10 +20,10 @@ l1_batch_commit_data_generator_mode = "Validium" miniblock_iteration_interval = 50 [eth_sender] -sender_pubdata_sending_mode="Custom" +sender_pubdata_sending_mode = "Custom" [eth_client] -web3_url = "http://reth:8545" +web3_url = "http://localhost:8545" [_metadata] base = ["dev.toml"] diff --git a/etc/env/configs/docker.toml b/etc/env/configs/docker.toml index 2f72e183a84a..b489705324e5 100644 --- a/etc/env/configs/docker.toml +++ b/etc/env/configs/docker.toml @@ -1,18 +1,18 @@ -__imports__ = [ "base", "l1-inits/.init.env", "l2-inits/docker.init.env" ] +__imports__ = ["base", "l1-inits/.init.env", "l2-inits/docker.init.env"] ETH_SENDER_SENDER_PUBDATA_SENDING_MODE = "Calldata" sqlx_offline = true -database_url = "postgres://postgres:notsecurepassword@postgres/zksync_local" -database_prover_url = "postgres://postgres:notsecurepassword@postgres/prover_local" -test_database_url = "postgres://postgres:notsecurepassword@host:5433/zksync_local_test" -test_database_prover_url = "postgres://postgres:notsecurepassword@host:5433/prover_local_test" +database_url = "postgres://postgres:notsecurepassword@localhost:5432/zksync_local" +database_prover_url = "postgres://postgres:notsecurepassword@localhost:5432/prover_local" +test_database_url = "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test" +test_database_prover_url = "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test" # for loadtest -l1_rpc_address = "http://reth:8545" +l1_rpc_address = "http://localhost:8545" [eth_client] -web3_url = "http://reth:8545" +web3_url = "http://localhost:8545" [chain.state_keeper] miniblock_iteration_interval = 50 diff --git a/etc/env/configs/ext-node-docker.toml b/etc/env/configs/ext-node-docker.toml index bc6711e47414..854a9f7d1355 100644 --- a/etc/env/configs/ext-node-docker.toml +++ b/etc/env/configs/ext-node-docker.toml @@ -1,11 +1,11 @@ -__imports__ = [ "configs/ext-node.toml" ] +__imports__ = ["configs/ext-node.toml"] -database_url = "postgres://postgres:notsecurepassword@postgres/_ext_node" -template_database_url = "postgres://postgres:notsecurepassword@postgres/zksync_local" -test_database_url = "postgres://postgres:notsecurepassword@host:5433/zksync_local_test_ext_node" +database_url = "postgres://postgres:notsecurepassword@localhost:5432/_ext_node" +template_database_url = "postgres://postgres:notsecurepassword@localhost:5432/zksync_local" +test_database_url = "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test_ext_node" [en] -eth_client_url = "http://reth:8545" +eth_client_url = "http://localhost:8545" [_metadata] base = ["ext-node.toml"] diff --git a/etc/env/configs/ext-node-validium-docker.toml b/etc/env/configs/ext-node-validium-docker.toml index 1919233cb525..89aea2fd8cf9 100644 --- a/etc/env/configs/ext-node-validium-docker.toml +++ b/etc/env/configs/ext-node-validium-docker.toml @@ -1,12 +1,12 @@ -__imports__ = [ "configs/ext-node-validium.toml" ] +__imports__ = ["configs/ext-node-validium.toml"] -database_url = "postgres://postgres:notsecurepassword@postgres/_ext_node" -template_database_url = "postgres://postgres:notsecurepassword@postgres/zksync_local" +database_url = "postgres://postgres:notsecurepassword@localhost:5432/_ext_node" +template_database_url = "postgres://postgres:notsecurepassword@localhost:5432/zksync_local" test_database_url = "postgres://postgres:notsecurepassword@host:5433/zksync_local_test_ext_node" [en] l1_batch_commit_data_generator_mode = "Validium" -eth_client_url = "http://reth:8545" +eth_client_url = "http://localhost:8545" [_metadata] base = ["ext-node-validium.toml"] diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 19921cf536c4..864bff15dedf 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -140,7 +140,7 @@ prover: file_backed: file_backed_base_path: artifacts max_retries: 10 - setup_data_path: crates/bin/vk_setup_data_generator_server_fri/data + setup_data_path: data/keys prometheus_port: 3315 max_attempts: 10 generation_timeout_in_secs: 600 @@ -303,19 +303,6 @@ external_price_api_client: house_keeper: l1_batch_metrics_reporting_interval_ms: 10000 - gpu_prover_queue_reporting_interval_ms: 10000 - prover_job_retrying_interval_ms: 30000 - witness_generator_job_retrying_interval_ms: 30000 - witness_generator_stats_reporting_interval_ms: 10000 - witness_job_moving_interval_ms: 40000 - prover_db_pool_size: 2 - prover_stats_reporting_interval_ms: 5000 - proof_compressor_job_retrying_interval_ms: 30000 - proof_compressor_stats_reporting_interval_ms: 10000 - prover_job_archiver_archiving_interval_ms: 1800000 - prover_job_archiver_archive_after_secs: 172800 - fri_gpu_prover_archiver_archiving_interval_ms: 86400000 - fri_gpu_prover_archiver_archive_after_secs: 172800 prometheus: listener_port: 3314 diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 6d7a6ba3c338..220a75944e02 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,7 +1,7 @@ genesis_root: 0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5 genesis_rollup_leaf_index: 54 genesis_batch_commitment: 0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd -genesis_protocol_semantic_version: '0.24.1' +genesis_protocol_semantic_version: '0.24.2' # deprecated genesis_protocol_version: 24 default_aa_hash: 0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32 diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml new file mode 100644 index 000000000000..108192b18438 --- /dev/null +++ b/etc/lint-config/ignore.yaml @@ -0,0 +1,26 @@ +files: [ + "KeysWithPlonkVerifier.sol", + "TokenInit.sol", + ".tslintrc.js", + ".prettierrc.js" +] +dirs: [ + "target", + "node_modules", + "volumes", + "build", + "dist", + ".git", + "generated", + "grafonnet-lib", + "prettier-config", + "lint-config", + "cache", + "artifacts", + "typechain", + "binaryen", + "system-contracts", + "artifacts-zk", + "cache-zk", + "contracts/" +] diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index 303c91b137cb..cb8ebfb51549 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -22,13 +22,20 @@ nixsgxLib.mkSGXContainer { loader = { argv = [ entrypoint + "--env-prefix" + "TEE_PROVER_" + "--" "${tee_prover}/bin/zksync_tee_prover" ]; log_level = "error"; env = { - TEE_API_URL.passthrough = true; + TEE_PROVER_API_URL.passthrough = true; + TEE_PROVER_MAX_RETRIES.passthrough = true; + TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC.passthrough = true; + TEE_PROVER_RETRY_BACKOFF_MULTIPLIER.passthrough = true; + TEE_PROVER_MAX_BACKOFF_SEC.passthrough = true; API_PROMETHEUS_LISTENER_PORT.passthrough = true; API_PROMETHEUS_PUSHGATEWAY_URL.passthrough = true; API_PROMETHEUS_PUSH_INTERVAL_MS.passthrough = true; diff --git a/etc/reth/chaindata/reth_config b/etc/reth/chaindata/reth_config index 24e15c4b35bd..2eaf37e59e22 100644 --- a/etc/reth/chaindata/reth_config +++ b/etc/reth/chaindata/reth_config @@ -70,10 +70,37 @@ "E90E12261CCb0F3F7976Ae611A29e84a6A85f424": { "balance": "0x4B3B4CA85A86C47A098A224000000000" }, + "5711E991397FCa8F5651c9Bb6FA06b57e4a4DCC0": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "a61464658afeaf65cccaafd3a512b69a83b77618": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "0d43eb5b8a47ba8900d84aa36656c92024e9772e": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "a13c10c0d5bd6f79041b9835c63f91de35a15883": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "4f9133d1d3f50011a6859807c837bdcb31aaab13": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "bd29a1b981925b94eec5c4f1125af02a2ec4d1ca": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "edb6f5b4aab3dd95c7806af42881ff12be7e9daa": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, "e706e60ab5dc512c36a4646d719b889f398cbbcb": { "balance": "0x4B3B4CA85A86C47A098A224000000000" }, - "5711E991397FCa8F5651c9Bb6FA06b57e4a4DCC0": { + "e90e12261ccb0f3f7976ae611a29e84a6a85f424": { + "balance": "0x4B3B4CA85A86C47A098A224000000000" + }, + "78192af4ce300352a7d44b17bc2b3a3df545e200": { "balance": "0x4B3B4CA85A86C47A098A224000000000" } }, diff --git a/etc/selector-generator-data/README.md b/etc/selector-generator-data/README.md new file mode 100644 index 000000000000..ddba2769e4f2 --- /dev/null +++ b/etc/selector-generator-data/README.md @@ -0,0 +1,3 @@ +# List of selectors from our contracts + +To regenerate the list, please use the selector_generator tool from core/bin directory. diff --git a/etc/selector-generator-data/selectors.json b/etc/selector-generator-data/selectors.json new file mode 100644 index 000000000000..6ea986e4263a --- /dev/null +++ b/etc/selector-generator-data/selectors.json @@ -0,0 +1,518 @@ +{ + "d0f2c663": "getBatchNumberAndTimestamp", + "2a79c611": "getCommitment", + "8129fc1c": "initialize", + "e2a9d554": "setUpgradeTimestamp", + "84c2ff75": "stmAssetId", + "7ac3a553": "withdrawLegacyBridge", + "e91659ae": "addNewChainIfNeeded", + "06d49e5b": "getPubdataPricingMode", + "1ff5a783": "execute", + "8310f2c6": "transferFundsFromSharedBridge", + "80b41246": "getBlockHashEVM", + "7da01cd6": "executeUpgrade", + "74044673": "addStateTransitionManager", + "82b57749": "forwardedBridgeMint", + "6478d8ed": "chainAdmin", + "4af63f02": "deploy", + "d0707b67": "aggregate", + "e0ab6368": "assetIdIsRegistered", + "27e86d6e": "getLastBlockHash", + "13bc9f20": "isOperationReady", + "4a2e35ba": "withdraw", + "1e4fba05": "getChainRoot", + "762008c2": "executeBatchesSharedBridge", + "155fd27a": "setValueUnderNonce", + "a6ae0aac": "coinbase", + "86d516e8": "getCurrentBlockGasLimit", + "3659cfe6": "upgradeTo", + "fa8f7ea6": "getAllHyperchains", + "7b510fe8": "getAccountInfo", + "40c10f19": "mint", + "e02e1bfd": "chainCount", + "015f58d7": "genesisUpgrade", + "28e439f3": "tryBlockAndAggregate", + "e76db865": "setPubdataPricingMode", + "62f84b24": "sendToL1", + "1c9f0149": "updateChainBalancesFromSharedBridge", + "38720778": "sharedBridge", + "64e130cf": "nativeTokenVault", + "adfca15e": "facetFunctionSelectors", + "af500fb7": "readBytes32", + "7b315630": "upgradeChainFromVersion", + "b6ea1757": "pushNewLeaf", + "e66c8c44": "validatorTimelock", + "4f1ef286": "upgradeToAndCall", + "fe26699e": "getTotalBlocksCommitted", + "805b9869": "executeTransactionFromOutside", + "aa4593dc": "revertReceive", + "64b554ad": "forwardedBridgeBurn", + "ba238947": "getProtocolVersion", + "07f8c636": "multicall", + "39607382": "getTotalBlocksExecuted", + "796b89b9": "getBlockTimestamp", + "9cd939e4": "l2LogsRootHash", + "b298e36b": "push", + "7890e5da": "side", + "5e1ac65f": "hashOperation", + "1806aa18": "getCodeSize", + "d4a4ca0d": "getBlockNumberAndTimestamp", + "06bed036": "setL2Block", + "aa970773": "validateAndPayForPaymasterTransaction", + "6223258e": "setDAValidatorPair", + "728cb93b": "bridgeClaimFailedBurn", + "d6abe642": "getAssetId", + "d2ef1b0e": "storedBatchZero", + "51b3c157": "hyperbridgingEnabled", + "53e61bdc": "processL2RollupDAValidatorOutputHash", + "95d89b41": "symbol", + "a37dc1d4": "forwardedBridgeClaimFailedBurn", + "db1f0bf9": "getTotalBatchesCommitted", + "beda594a": "setHyperchain", + "3977d71c": "getAggregatedRoot", + "c4d252f5": "cancel", + "2878fe74": "genesisUpgrade", + "2ab0f529": "isOperationDone", + "5d4edca7": "BRIDGE_HUB", + "d4b9f4fa": "messageRoot", + "fb1a9a57": "getDeploymentNonce", + "bb0fd610": "extendedAccountVersion", + "3cda3351": "create2", + "3a9d7f8d": "stmDeployer", + "db541184": "setShouldRevertOnExecuteBatches", + "74f4f547": "bridgeBurn", + "b852ad36": "l1SharedBridge", + "6ef25c3a": "baseFee", + "eb39e6d5": "stateTransitionManager", + "381c3f13": "checkDA", + "f92ad219": "initialize", + "9fa8826b": "depositHappened", + "01d23d4b": "diamondCut", + "55d35d18": "getValueUnderNonce", + "ee7fb38b": "calculateRoot", + "64d62353": "updateDelay", + "fd3c6b55": "processCalldataDA", + "39b34c6e": "requestBytecodeL1Publication", + "71623274": "l2TransactionBaseCost", + "53b9e632": "assetHandlerAddress", + "c987336c": "upgrade", + "5c975abb": "paused", + "4623c91d": "setValidator", + "4f1e1be0": "storeAccountConstructingCodeHash", + "b0f40a17": "processBatch", + "2c4f2a58": "bridgehubDepositBaseToken", + "ced531eb": "setHashes", + "18160ddd": "totalSupply", + "7cb9357e": "gasPerPubdataByte", + "7877a797": "blockGasLimit", + "cdc4878b": "nodeCount", + "c2eeeebd": "l1Address", + "0f23da43": "revertBatchesSharedBridge", + "e1239cd8": "incrementMinNonceIfEquals", + "8456cb59": "pause", + "9a42c2c2": "zeroPointerTest", + "f9f3ee2d": "setResult", + "7ba8be34": "decodeUint8", + "a635f01d": "delegateCall", + "2f90b184": "L1_CHAIN_ID", + "6c0960f9": "finalizeEthWithdrawal", + "31d50750": "isOperation", + "59ec65a2": "baseToken", + "a9b0d128": "setPriorityTreeStartIndex", + "c4879440": "bridgehubDepositBaseToken", + "823f1d96": "l2TokenProxyBytecodeHash", + "18876a04": "chunkPubdataToBlobs", + "699b0fb9": "bridgeBurn", + "17338945": "unfreezeDiamond", + "8a75bb09": "saveL2LogsRootHash", + "91b19874": "validators", + "63dc94b1": "forceDeploy", + "5a590335": "getDAValidatorPair", + "60144197": "setTokenMultiplierSetter", + "938b5f32": "origin", + "36ba0355": "bridgeMint", + "6dde7209": "l2TokenBeacon", + "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", + "7e44bc5e": "setImmutables", + "8e8acf87": "getL2BlockNumberAndTimestamp", + "e30c3978": "pendingOwner", + "f5e69a47": "publishCompressedBytecode", + "84da1fb4": "getNewAddressCreate2", + "47fcedb8": "setFeeParams", + "b22dd78e": "storedBatchHash", + "57e6246b": "initialCutHash", + "2b805192": "setNewVersionUpgrade", + "dbfe3e96": "updateSecurityCouncil", + "e03fe177": "getCodeHash", + "02fa5779": "setNewBatch", + "a225efcb": "setPubdataInfo", + "9cc395d0": "bridgeCheckCounterpartAddress", + "868085b1": "getBatchProofPublicInput", + "6a0cd1f5": "removeValidator", + "2ae9c600": "protocolVersion", + "61f91b2e": "initialForceDeploymentHash", + "72425d9d": "getCurrentBlockDifficulty", + "8c2a993e": "bridgeMint", + "b473318e": "l2TransactionBaseCost", + "f851a440": "admin", + "681fe70c": "isEmpty", + "ef3f0bae": "getTotalBatchesVerified", + "ba75bbd8": "front", + "cdffacc6": "facetAddress", + "89f9a072": "validatePubdata", + "66869d49": "changeFeeParams", + "e8b99b1b": "deposit", + "4d4a1eca": "setTokenMultiplier", + "a0803ef7": "currentBlockInfo", + "fb4baf17": "changeFeeParams", + "3591c1a0": "getBridgehub", + "fd791f3c": "getL2DefaultAccountBytecodeHash", + "ec8067c7": "updateNonceOrdering", + "a3912ec8": "receiveEther", + "79823c9a": "getFirstUnprocessedPriorityTx", + "235d9eb5": "setTokenMultiplier", + "dd354a06": "calculateCreate2TokenAddress", + "7efda2ae": "proveL2LeafInclusion", + "f120e6c4": "encodeTxDataHash", + "f5f15168": "l2TokenAddress", + "4d2301cc": "getEthBalance", + "ab07b2e9": "getL2GasPrice", + "363bf964": "setAddresses", + "607457f2": "setShouldRevertOnCommitBatches", + "d1ba7e97": "hyperchainAddress", + "841a9d42": "aggregate3Value", + "ea6c029c": "baseTokenGasPriceMultiplierNominator", + "de8fa431": "getSize", + "24a55db9": "markBytecodeAsPublished", + "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", + "ddeaa8e6": "getBatchHash", + "8f31f052": "isWithdrawalFinalized", + "41cf49bb": "prepareChainCommitment", + "5d382700": "create2Account", + "6d9860e1": "l1AssetRouter", + "e1ad1162": "transfer", + "bf1fe420": "setGasPrice", + "a1954fc5": "getTotalPriorityTxs", + "c0a16dda": "setAssetDeploymentTracker", + "4145ca27": "removePriorityQueueFront", + "09e14277": "setStateTransitionManager", + "1f067457": "revertTransfer", + "b8c2f66f": "getTotalBatchesExecuted", + "07ee9355": "l2BridgeAddress", + "095ea7b3": "approve", + "84b0196e": "eip712Domain", + "18b1771f": "getAssetId", + "f85894c5": "forwardedBridgeBurn", + "bd7c5412": "isEthWithdrawalFinalized", + "70a08231": "balanceOf", + "3425eb89": "tokenMultiplierSetter", + "5aa9b6b5": "getRawNonce", + "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", + "205c2878": "withdrawTo", + "ec3d5f88": "setPriorityTxMaxGasLimit", + "8eb7db57": "bridgehubConfirmL2Transaction", + "2a72b707": "bridgehubRequestL2Transaction", + "0f3fa211": "setNativeTokenVault", + "4bed8212": "isWithdrawalFinalized", + "0c56efe9": "initializeV2", + "501e60d5": "setUpgradeDiamondCut", + "c29f093f": "setSTM", + "f2fde38b": "transferOwnership", + "8c5a3445": "general", + "ca8f93f1": "setLegacyBaseTokenAssetId", + "71abd109": "upgrade", + "eced0bf0": "__DEPRECATED_tokenIsRegistered", + "dc8e4b26": "registerSettlementLayer", + "310ab089": "getImmutable", + "19cae462": "difficulty", + "77421056": "setFunctionToCall", + "3997d064": "tryAggregate", + "f1d357e5": "L1_SHARED_BRIDGE", + "952a3ee7": "getERC20Getters", + "29b98c67": "isDiamondStorageFrozen", + "17d7de7c": "getName", + "e81e0ba1": "isFunctionFreezable", + "7ebba672": "setTokenMultiplier", + "6ee1dc20": "validateNonceUsage", + "6a27e8b5": "getSettlementLayer", + "7a28adb2": "proveL2LogInclusion", + "671a7131": "settlementLayer", + "accdd16c": "freezeChain", + "c3bbd2d7": "isFacetFreezable", + "99a88ec4": "upgrade", + "95f11a40": "bridgeInitialize", + "c9f5c932": "requestL2TransactionTwoBridges", + "f1a78aa6": "postTransaction", + "ca65fe79": "finalizeDeposit", + "5518c73b": "getStateTransitionManager", + "b5b18fe5": "processL2Logs", + "969b53da": "l1Bridge", + "e8a71ca9": "forwardedBridgeMint", + "505e6d47": "updateAllLeaves", + "ecf95b8a": "createAccount", + "84d9fedd": "popFront", + "3f4ba83a": "unpause", + "1f98fa08": "createNewChain", + "313ce567": "decimals", + "3ce695e7": "registerSTMAssetOnL1", + "73c58a2d": "publishBlobs", + "f0e9da23": "readAddress", + "e23d2563": "getEraChainId", + "0ec6b0b7": "getPriorityTxMaxGasLimit", + "fdbb0301": "__DEPRECATED_l2BridgeAddress", + "52d1902d": "proxiableUUID", + "97bb3ce9": "tokenAddress", + "5d83b6da": "__DEPRECATED_baseToken", + "966c523e": "blockAndAggregate", + "f4943a20": "protocolVersionDeadline", + "46746c7d": "commitBatchesSharedBridge", + "87d9d023": "verify", + "57f3921f": "stmAssetIdToAddress", + "e516761e": "markFactoryDeps", + "daa51a8c": "pushBack", + "2e1a7d4d": "withdraw", + "af6ed122": "executeUpgrade", + "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", + "01eae183": "depositAmount", + "9e8945d2": "verificationKeyHash", + "a3bd0112": "genesisUpgrade", + "927c4bf7": "upgradeExternal", + "56079ac8": "sendL2ToL1Log", + "d92f86a2": "setLegacyChainAddress", + "be6f11cf": "setPriorityTxMaxGasLimit", + "7321c485": "dummySetValidator", + "c0991525": "claimFailedDeposit", + "72d74cd7": "reinitializeToken", + "ab93d6f3": "requestL2TransactionToGatewayMailbox", + "3601e63e": "bridgeRecoverFailedTransfer", + "eb672419": "requestL2Transaction", + "af6a2dcd": "getTotalBlocksVerified", + "27eb6c0f": "securityCouncil", + "4c6314f0": "getMarker", + "49a7cc72": "payForTransaction", + "f20265d2": "setRevertTransfer", + "84bc3eb0": "withdrawWithMessage", + "79c4f929": "markBytecodeAsPublished", + "580d6bff": "updateAllNodesAtHeight", + "e5355c75": "getL2SystemContractsUpgradeBatchNumber", + "ca408c23": "bridgehubDeposit", + "6ab8f82e": "proveL2LogInclusion", + "7528c2c6": "applyL1ToL2Alias", + "59890bcb": "setExecutedBatches", + "b19f0ade": "executeUpgradeNoOverlap", + "15f9a2fe": "prepareForPaymaster", + "6e9d7899": "legacyBridge", + "ef0e2ff4": "setChainId", + "e52db4ca": "baseTokenAssetId", + "0f28c97d": "getCurrentBlockTimestamp", + "d0e30db0": "deposit", + "9623609d": "upgradeAndCall", + "5ca1e165": "getRoot", + "fe173b97": "gasPrice", + "a851ae78": "setTxOrigin", + "18717dc1": "setPorterAvailability", + "cbcf2e3c": "isTransactionAllowed", + "c4d66de8": "initialize", + "7c9bd1f3": "publishTimestampDataToL1", + "69c76df2": "readUint32", + "a75b496d": "getAllHyperchainChainIDs", + "f5ba4232": "removeStateTransitionManager", + "42cbb15c": "getBlockNumber", + "607e2cb2": "setRevertReceive", + "328ef4fe": "setBaseTokenGasMultiplierPrice", + "1c50cfea": "addTokenAssetId", + "6d1d8363": "scheduleShadow", + "9cc7f708": "balanceOf", + "933999fb": "deposit", + "c2e047ff": "aggregate3", + "bb7044b6": "stateTransitionManagerIsRegistered", + "d4ce08c2": "addNewChain", + "f34d1868": "setExecutionDelay", + "9caf9bac": "setX", + "f113c88b": "createNewChain", + "1cc5d103": "setPorterAvailability", + "cdf25430": "L1_ASSET_ROUTER", + "def9d6af": "protocolVersionIsActive", + "c21a38e2": "proveL2MessageInclusion", + "e543e5bf": "setChainCreationParams", + "4be99e1d": "getCurrentPubdataCost", + "74f4d30d": "storedBlockHash", + "f8f7cd76": "validateTransaction", + "7a0ed627": "facets", + "38a78092": "increaseMinNonce", + "8cb7f3d0": "forceDeployOnAddresses", + "a2d5a0cc": "proveBatchesSharedBridge", + "301e7765": "getChainAdmin", + "fb644fc5": "addChainBatchRoot", + "6006d8b5": "verifyCompressedStateDiffs", + "39509351": "increaseAllowance", + "51cff8d9": "withdraw", + "8ffe1b81": "setBridgeHubAddress", + "95ce3e93": "decodeString", + "09824a80": "registerToken", + "d86970d8": "getL2BootloaderBytecodeHash", + "a31ee5b0": "initialize", + "0d4651aa": "storeAccountConstructedCodeHash", + "9a188371": "requestL2TransactionDirect", + "ed1d7d97": "chainIndexToId", + "c63c4e9b": "minDelay", + "546b6d2a": "SHARED_BRIDGE", + "187598a5": "getNewAddressCreate", + "bf529569": "setFreezability", + "cfe7af7c": "finalizeDeposit", + "bcf284e5": "executeTransaction", + "3437949a": "l1GenesisUpgrade", + "f54266a2": "l1TokenAddress", + "c9d1c097": "stmAssetIdFromChainId", + "39d7d4aa": "getPriorityTreeRoot", + "41c841c3": "L1_WETH_TOKEN", + "19fa7f62": "claimFailedDeposit", + "5c60da1b": "implementation", + "dd62ed3e": "allowance", + "9cd45184": "chainBalance", + "7958004c": "getOperationState", + "8cf2b2f0": "uncheckedInc", + "715018a6": "renounceOwnership", + "30bda03e": "setL1Erc20Bridge", + "c0d5b949": "getCurrentPubdataSpent", + "4de2e468": "getRawCodeHash", + "7ecebe00": "nonces", + "0e18b681": "acceptAdmin", + "d0468156": "getPendingAdmin", + "d83e4e03": "genesisUpgrade", + "49eb3b50": "getTransactionHashes", + "ebf0c717": "root", + "8da5cb5b": "owner", + "11a2ccc1": "finalizeWithdrawal", + "1dd93b33": "keccakValidationTest", + "f088ccdc": "callCodeOracle", + "aad74262": "setProtocolVersionDeadline", + "72c84445": "callKeccak", + "21f603d7": "setTransactionFilterer", + "52ef6b2c": "facetAddresses", + "9e6ea417": "depositLegacyErc20Bridge", + "960dcf24": "getBaseTokenAssetId", + "a888cc3a": "bridgehubRequestL2TransactionOnGateway", + "c7ca373c": "initFromCommitment", + "548a5a33": "setAssetHandlerAddressThisChain", + "402efc91": "stateTransitionManager", + "7b30c8da": "getL2SystemContractsUpgradeTxHash", + "0ef26743": "height", + "79ba5097": "acceptOwnership", + "584b153e": "isOperationPending", + "06fdde03": "name", + "e717bab7": "proveL1ToL2TransactionStatusViaGateway", + "a8b0574e": "getCurrentBlockCoinbase", + "30e5ccbd": "incrementTxNumberInBatch", + "ef011dff": "ERA_CHAIN_ID", + "f8c1f0d2": "upgradeChainFromVersion", + "f3b7dead": "getProxyAdmin", + "f26f3c8f": "proveL2MessageInclusion", + "3558c188": "executeBatches", + "bcd1b23d": "updateFullTree", + "3a3f36f9": "codeOracleTest", + "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "81d100a3": "scheduleTransparent", + "85e4e16a": "assetDeploymentTracker", + "204e1c7a": "getProxyImplementation", + "d566afd3": "createBatchCommitment", + "70f5c679": "setMessageRoot", + "07168226": "deployBeaconProxy", + "7b574586": "publishedBlobCommitments", + "fcc73360": "updateLeaf", + "631f4bac": "getPriorityQueueSize", + "3e64a696": "getBasefee", + "facd743b": "isValidator", + "7fb67816": "setValidatorTimelock", + "ee82ac5e": "getBlockHash", + "6e9960c3": "getAdmin", + "98acd7a6": "getBaseToken", + "06e7517b": "appendTransactionToCurrentL2Block", + "b993549e": "getCommittedBatchTimestamp", + "23dc4a09": "keccakPerformUpgrade", + "cf347e17": "setValidator", + "3408e470": "getChainId", + "ae1f6aaf": "l2Bridge", + "c2e90293": "bridgeRecoverFailedTransfer", + "86b7f856": "publishPubdataAndClearState", + "b292f5f1": "proveL1ToL2TransactionStatus", + "7a592065": "calculateRoot", + "a5277a02": "initialize", + "ef939455": "keccakUpgradeTest", + "3644e515": "DOMAIN_SEPARATOR", + "306395c6": "incrementDeploymentNonce", + "b277f199": "uncheckedAdd", + "6fadcf72": "forward", + "ae65def1": "node", + "e0bf0850": "setShouldRevertOnProveBatches", + "a457c2d7": "decreaseAllowance", + "9f3f89dc": "getZero", + "4dd18bf5": "setPendingAdmin", + "33ce93fe": "getProtocolVersion", + "c87325f1": "finalizeWithdrawal", + "40a434d5": "transferTokenToNTV", + "e9420f8c": "whitelistedSettlementLayers", + "3f704d2a": "setAssetHandlerAddress", + "ede25608": "protocolVersionToUpgradeTimestamp", + "042901c7": "proveL1ToL2TransactionStatus", + "cab7e8eb": "isNonceUsed", + "5aa6fa1f": "NATIVE_TOKEN_VAULT", + "b8776d4d": "chainRegistered", + "8fbb3711": "claimFailedDepositLegacyErc20Bridge", + "8dd14802": "setBridge", + "b3160bad": "executeBatchesSharedBridge", + "f5c1182c": "getSemverProtocolVersion", + "8b257989": "executionDelay", + "588570a5": "initialize", + "4cd40a02": "setLegacyTokenAssetId", + "d124dc4f": "send", + "23b872dd": "transferFrom", + "086a56f8": "getBaseTokenBridge", + "689992b3": "undoL1ToL2Alias", + "03c5d8af": "forwardTransactionOnGateway", + "48ceb85e": "chainIndex", + "ba334825": "hyperchain", + "b1fde1a8": "sharedTree", + "7069d0c0": "executeInstant", + "c2aaf9c4": "receiveEth", + "2986c0e5": "index", + "b5872958": "timestamps", + "c2e4ff97": "markAccountCodeHashAsConstructed", + "9c4d535b": "create", + "923b3b56": "forceDeployOnAddress", + "3635f3e6": "resetTxNumberInBatch", + "19698bc9": "infiniteFunction", + "315fff4e": "THIS_ADDRESS", + "52c9eacb": "upgradeCutHash", + "18e3a941": "getVerifierParams", + "29f172ad": "unsafeOverrideBatch", + "4b561753": "addValidator", + "a9059cbb": "transfer", + "949431dc": "approvalBased", + "8f283970": "changeAdmin", + "85df51fd": "blockHash", + "dead6f7f": "getHyperchain", + "896909dc": "getMinNonce", + "7eff275e": "changeProxyAdmin", + "27ae4c16": "freezeDiamond", + "566338a9": "getL1TokenAddress", + "8ac84c0e": "txNumberInBlock", + "53ce2061": "revertBatches", + "9a8a0592": "chainId", + "f5407abe": "setValues", + "46657fe9": "getVerifier", + "484f0505": "getHyperchainLegacy", + "b760faf9": "depositTo", + "5de097b1": "nullifyChainBalanceByNTV", + "e8295588": "zeros", + "f90eb963": "getPorterAvailability", + "57180981": "updateAccountVersion", + "579952fc": "transferFromTo", + "d505accf": "permit", + "e02da327": "readUint256", + "51d218f7": "unfreezeChain", + "8466d8d1": "getBridgeHubAddress", + "b381724e": "setFeeParams", + "d9caed12": "withdraw", + "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/common.json b/etc/upgrades/1720794961-decentralize-governance/common.json new file mode 100644 index 000000000000..655d2c435f59 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/common.json @@ -0,0 +1,5 @@ +{ + "name": "decentralize-governance", + "creationTimestamp": 1720794961, + "protocolVersion": "0.24.2" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json b/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json new file mode 100644 index 000000000000..cd292b92d4ca --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66ab923f" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x4d376798Ba8F69cEd59642c3AE8687c7457e855d", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1722520127", + "stmScheduleTransparentOperation": "0x2c4319170000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000c2ee6b6af7d616f6e27ce7f4a451aedc2b0f5f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000c2ee6b6af7d616f6e27ce7f4a451aedc2b0f5f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c431917000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0x4d376798Ba8F69cEd59642c3AE8687c7457e855d", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x32400084C286CF3E17e7B677ea9583e60a000324", + "value": 0, + "data": "0xfc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c431917000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a0003240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d941000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a0003240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d941000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json b/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json new file mode 100644 index 000000000000..61abc87f040b --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66a375e5" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1721988581", + "stmScheduleTransparentOperation": "0x2c4319170000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000925dd0bc14552b0b261ca8a23ad26df9c6f2c8ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000925dd0bc14552b0b261ca8a23ad26df9c6f2c8ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x5BBdEDe0F0bAc61AA64068b60379fe32ecc0F96C", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json b/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json new file mode 100644 index 000000000000..ff030ae9f0d7 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x669123f2" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1720787954", + "stmScheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000008b448ac7cd0f18f3d8464e2645575772a26a3b6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000008b448ac7cd0f18f3d8464e2645575772a26a3b6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x6d6e010A2680E2E5a3b097ce411528b36d880EF6", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json b/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json new file mode 100644 index 000000000000..19187138aec3 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66a21f2e" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1721900846", + "stmScheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000004e39e90746a9ee410a8ce173c7b96d3afed444a50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000004e39e90746a9ee410a8ce173c7b96d3afed444a50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x9A6DE0f62Aa270A8bCB1e2610078650D539B1Ef9", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/utils/src/file-configs.ts b/etc/utils/src/file-configs.ts index 1675745bca5d..374bf53f6be9 100644 --- a/etc/utils/src/file-configs.ts +++ b/etc/utils/src/file-configs.ts @@ -2,18 +2,11 @@ import * as path from 'path'; import * as fs from 'fs'; import * as yaml from 'yaml'; -export function shouldLoadConfigFromFile() { +export type FileConfig = { loadFromFile: false; chain?: undefined } | { loadFromFile: true; chain: string }; + +export function shouldLoadConfigFromFile(): FileConfig { const chain = process.env.CHAIN_NAME; - if (chain) { - return { - loadFromFile: true, - chain - } as const; - } else { - return { - loadFromFile: false - } as const; - } + return chain ? { loadFromFile: true, chain } : { loadFromFile: false }; } export const configNames = [ @@ -39,6 +32,19 @@ export function loadEcosystem(pathToHome: string) { ); } +export function loadChainConfig(pathToHome: string, chain: string) { + const configPath = path.join(pathToHome, 'chains', chain, '/ZkStack.yaml'); + + if (!fs.existsSync(configPath)) { + return []; + } + return yaml.parse( + fs.readFileSync(configPath, { + encoding: 'utf-8' + }) + ); +} + export function loadConfig({ pathToHome, chain, diff --git a/etc/utils/src/kill.ts b/etc/utils/src/kill.ts new file mode 100644 index 000000000000..7fdab85afadd --- /dev/null +++ b/etc/utils/src/kill.ts @@ -0,0 +1,19 @@ +import { promisify } from 'node:util'; +import { exec } from 'node:child_process'; + +export async function killPidWithAllChilds(pid: number, signalNumber: number) { + let childs = [pid]; + while (true) { + try { + let child = childs.at(-1); + childs.push(+(await promisify(exec)(`pgrep -P ${child}`)).stdout); + } catch (e) { + break; + } + } + // We always run the test using additional tools, that means we have to kill not the main process, but the child process + for (let i = childs.length - 1; i >= 0; i--) { + console.log(`kill ${childs[i]}`); + await promisify(exec)(`kill -${signalNumber} ${childs[i]}`); + } +} diff --git a/etc/utils/src/logs.ts b/etc/utils/src/logs.ts new file mode 100644 index 000000000000..7db54ef8600c --- /dev/null +++ b/etc/utils/src/logs.ts @@ -0,0 +1,11 @@ +import path from 'path'; +import fs from 'node:fs/promises'; + +const pathToHome = path.join(__dirname, '../../..'); + +export async function logsTestPath(chain: string | undefined, relativePath: string, name: string): Promise { + chain = chain ? chain! : 'default'; + let dir = path.join(pathToHome, relativePath, chain); + await fs.mkdir(dir, { recursive: true }); + return path.join(dir, name); +} diff --git a/etc/utils/src/server.ts b/etc/utils/src/server.ts deleted file mode 100644 index 94184f0db9b6..000000000000 --- a/etc/utils/src/server.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { background } from '.'; - -// TODO: change to use `zk_inception` once migration is complete -const BASE_COMMAND = 'zk_inception server'; -const BASE_COMMAND_WITH_ZK = 'zk server'; - -export function runServerInBackground({ - components, - stdio, - cwd, - useZkInception -}: { - components?: string[]; - stdio: any; - cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; -}) { - let command = useZkInception ? BASE_COMMAND : BASE_COMMAND_WITH_ZK; - if (components && components.length > 0) { - command += ` --components=${components.join(',')}`; - } - background({ command, stdio, cwd }); -} diff --git a/flake.lock b/flake.lock index e217d37664cd..e1905f2a1f65 100644 --- a/flake.lock +++ b/flake.lock @@ -360,11 +360,11 @@ "snowfall-lib": "snowfall-lib_2" }, "locked": { - "lastModified": 1719916365, - "narHash": "sha256-RzCFbGAHq6rTY4ctrmazGIx59qXtfrVfEnIe+L0leTo=", + "lastModified": 1723120465, + "narHash": "sha256-sWu5lKy71hHnSwydhwzG2XgSehjvLfK2iuUtNimvGkg=", "owner": "matter-labs", "repo": "nixsgx", - "rev": "0309a20ee5bf12b7390aa6795409b448420e80f2", + "rev": "b080c32f2aa8b3d4b4bc4356a8a513279b6f82ab", "type": "github" }, "original": { @@ -623,11 +623,11 @@ "vault-auth-tee-flake": "vault-auth-tee-flake" }, "locked": { - "lastModified": 1723034739, - "narHash": "sha256-bu4XvqwsPUzfMzk5t10wyHliItfH7FOk42V0CIwl4lg=", + "lastModified": 1725354393, + "narHash": "sha256-RSiDY3sr0hdlydO3cYtidjVx+OlqIsmcnvsSDSGQPF0=", "owner": "matter-labs", "repo": "teepot", - "rev": "4ed311a16a72521f79418216ad29e6eed8db347d", + "rev": "2c21d0161e43dc7a786787c89b84ecd6e8857106", "type": "github" }, "original": { diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 4aaed4186d75..dfea3a3bfc35 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -4,7 +4,8 @@ import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, GovernanceFactory, - StateTransitionManagerFactory + StateTransitionManagerFactory, + ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; @@ -207,6 +208,19 @@ function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { }; } +function prepareChainAdminCalldata(target: string, data: BytesLike): string { + const call = { + target: target, + value: 0, + data: data + }; + + const chainAdmin = new ChainAdminFactory(); + const calldata = chainAdmin.interface.encodeFunctionData('multicall', [[call], true]); + + return calldata; +} + export function prepareTransparentUpgradeCalldataForNewGovernance( oldProtocolVersion, oldProtocolVersionDeadline, @@ -249,6 +263,8 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( operation: governanceOperation } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); + const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); + const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( zksyncAddress, @@ -260,6 +276,7 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( stmExecuteOperation, scheduleTransparentOperation, executeOperation, + newExecuteChainUpgradeCalldata, diamondCut, governanceOperation, legacyScheduleOperation, diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 27de68d1d98d..a100d1231da6 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -1,4 +1,4 @@ -import { Command } from 'commander'; +import {Command} from 'commander'; import * as utils from 'utils'; const IMAGES = [ @@ -31,7 +31,7 @@ async function dockerCommand( dockerOrg: string = 'matterlabs' ) { // Generating all tags for containers. We need 2 tags here: SHA and SHA+TS - const { stdout: COMMIT_SHORT_SHA }: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); + const {stdout: COMMIT_SHORT_SHA}: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); // COMMIT_SHORT_SHA returns with newline, so we need to trim it const imageTagShaTS: string = process.env.IMAGE_TAG_SUFFIX ? process.env.IMAGE_TAG_SUFFIX @@ -114,7 +114,7 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf if (platform != '') { buildArgs += `--platform=${platform} `; } - if (image === 'prover-gpu-fri') { + if (image === 'prover-gpu-fri' || image == 'proof-fri-gpu-compressor') { const cudaArch = process.env.CUDA_ARCH; buildArgs += `--build-arg CUDA_ARCH='${cudaArch}' `; } @@ -126,6 +126,8 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf } buildArgs += extraArgs; + console.log("Build args: ", buildArgs); + const buildCommand = `DOCKER_BUILDKIT=1 docker buildx build ${tagsToBuild}` + (buildArgs ? ` ${buildArgs}` : '') + diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts index 5a17c9683742..0ef3515cc750 100644 --- a/infrastructure/zk/src/prover_setup.ts +++ b/infrastructure/zk/src/prover_setup.ts @@ -204,7 +204,7 @@ async function downloadDefaultSetupKeys(proverType: ProverType, region: string) ); await utils.spawn( - `cp -r ${process.env.ZKSYNC_HOME}/prover/vk_setup_data_generator_server_fri/data/* ${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` + `cp -r ${process.env.ZKSYNC_HOME}/prover/data/keys/* ${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` ); } diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 4df2039589ea..0201ce4a920f 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [16.5.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.4.0...prover-v16.5.0) (2024-08-28) + + +### Features + +* **prover_cli:** Add test for status, l1 and config commands. ([#2263](https://github.com/matter-labs/zksync-era/issues/2263)) ([6a2e3b0](https://github.com/matter-labs/zksync-era/commit/6a2e3b05b7d9c9e8b476fb207631c2285e1bd881)) +* **prover_cli:** Stuck status ([#2441](https://github.com/matter-labs/zksync-era/issues/2441)) ([232a817](https://github.com/matter-labs/zksync-era/commit/232a817a73fa842ca4b3be419bc775c85204901e)) +* **prover:** Add ProverJobMonitor ([#2666](https://github.com/matter-labs/zksync-era/issues/2666)) ([e22cfb6](https://github.com/matter-labs/zksync-era/commit/e22cfb6cffd2c4b2ad1ec3f3f433616fcd738511)) +* **prover:** parallelized memory queues simulation in BWG ([#2652](https://github.com/matter-labs/zksync-era/issues/2652)) ([b4ffcd2](https://github.com/matter-labs/zksync-era/commit/b4ffcd237ee594fc659ccfa96668868f5a87d5e3)) +* Provide easy prover setup ([#2683](https://github.com/matter-labs/zksync-era/issues/2683)) ([30edda4](https://github.com/matter-labs/zksync-era/commit/30edda404193938fbd55815bed164b5321d7c642)) + + +### Bug Fixes + +* **prover_cli:** Remove congif file check ([#2695](https://github.com/matter-labs/zksync-era/issues/2695)) ([2f456f0](https://github.com/matter-labs/zksync-era/commit/2f456f05937dec62d6a10cec8c948a2915650b92)) +* **prover_cli:** Update prover cli README ([#2700](https://github.com/matter-labs/zksync-era/issues/2700)) ([5a9bbb3](https://github.com/matter-labs/zksync-era/commit/5a9bbb3ccf900cea738290ceed2b1ed78908990c)) +* **prover:** change bucket for RAM permutation witnesses ([#2672](https://github.com/matter-labs/zksync-era/issues/2672)) ([8b4cbf4](https://github.com/matter-labs/zksync-era/commit/8b4cbf43e52203aac829324aa48450575b70c656)) +* **prover:** fail when fri prover job is not found ([#2711](https://github.com/matter-labs/zksync-era/issues/2711)) ([8776875](https://github.com/matter-labs/zksync-era/commit/87768755e8653e4be5f29945b56fd05a5246d5a8)) +* **prover:** Revert use of spawn_blocking in LWG/NWG ([#2682](https://github.com/matter-labs/zksync-era/issues/2682)) ([edfcc7d](https://github.com/matter-labs/zksync-era/commit/edfcc7dbb7fb60f0f42fff4f3d350974128127b4)) +* **prover:** speed up LWG and NWG ([#2661](https://github.com/matter-labs/zksync-era/issues/2661)) ([6243399](https://github.com/matter-labs/zksync-era/commit/6243399a9ebee88a80fbc6c7e794519712f6e955)) +* **vm:** Fix used bytecodes divergence ([#2741](https://github.com/matter-labs/zksync-era/issues/2741)) ([923e33e](https://github.com/matter-labs/zksync-era/commit/923e33e81bba83f72b97ca9590c5cdf2da2a311b)) + ## [16.4.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.3.0...prover-v16.4.0) (2024-08-16) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c510198ab65b..d29f0110f217 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -45,7 +45,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", "zerocopy", @@ -355,7 +355,7 @@ checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -407,54 +407,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bellman_ce" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const 0.7.0", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.30", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "bellman_ce" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aab6627603565b664e6c643a1dc7ea8bbff25b776f5fecd80ac88308fc7007b" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const 0.8.0", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.30", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - [[package]] name = "bigdecimal" version = "0.4.5" @@ -500,27 +452,6 @@ dependencies = [ "which", ] -[[package]] -name = "bindgen" -version = "0.65.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "prettyplease", - "proc-macro2 1.0.85", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.66", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -629,28 +560,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "blake2s_const" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_const" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db04f0f5f88d8c95977159949b23d2ed24d33309901cf7f7e48ed40f36de667" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - [[package]] name = "blake2s_simd" version = "0.5.11" @@ -701,18 +610,17 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" +checksum = "68ec2f007ff8f90cc459f03e9f30ca1065440170f013c868823646e2e48d0234" dependencies = [ "arrayvec 0.7.4", "bincode", "blake2 0.10.6", "const_format", "convert_case", - "crossbeam 0.8.4", + "crossbeam", "crypto-bigint 0.5.5", - "cs_derive", "derivative", "ethereum-types", "firestorm", @@ -720,7 +628,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce", "rand 0.8.5", "rayon", "serde", @@ -729,13 +636,15 @@ dependencies = [ "smallvec", "tracing", "unroll", + "zksync_cs_derive", + "zksync_pairing", ] [[package]] name = "boojum-cuda" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c861b4baec895cb8e53b10825407f0844b0eafda2ac79e7f02de95439f0f1e74" +checksum = "ac7735446f2263e8d12435fc4d5a02c7727838eaffc7c518a961b3e839fb59e7" dependencies = [ "boojum", "cmake", @@ -826,17 +735,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.1.14" @@ -863,12 +761,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -898,12 +790,12 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffaa17c1585fbf010b9340bb1fd7f4c4eedec2c15cb74a72162fd2d16435d55" +checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" dependencies = [ - "circuit_encodings 0.150.4", - "crossbeam 0.8.4", + "circuit_encodings 0.150.5", + "crossbeam", "derivative", "seq-macro", "serde", @@ -948,82 +840,82 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" +checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" dependencies = [ "derivative", "serde", - "zk_evm 0.150.4", - "zkevm_circuits 0.150.4", + "zk_evm 0.150.5", + "zkevm_circuits 0.150.5", ] [[package]] name = "circuit_sequencer_api" -version = "0.133.0" +version = "0.133.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" +checksum = "eb959b1f8c6bbd8be711994d182e85452a26a5d2213a709290b71c8262af1331" dependencies = [ - "bellman_ce 0.7.0", "derivative", "rayon", "serde", "zk_evm 0.133.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.140.0" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" +checksum = "fa5f22311ce609d852d7d9f4943535ea4610aeb785129ae6ff83d5201c4fb387" dependencies = [ - "bellman_ce 0.7.0", "circuit_encodings 0.140.1", "derivative", "rayon", "serde", "zk_evm 0.140.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a257b31a8ea1c1723cab4fb5661c6b4c0ebe022d4b73bea9eb7c9150bd3bc1" +checksum = "4c47c71d6ba83a8beb0af13af70beffd627f5497caf3d44c6f96363e788b07ea" dependencies = [ - "bellman_ce 0.8.0", "circuit_encodings 0.141.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.142.0" +version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" +checksum = "e264723359e6a1aad98110bdccf1ae3ad596e93e7d31da9e40f6adc07e4add54" dependencies = [ - "bellman_ce 0.7.0", "circuit_encodings 0.142.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" +checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" dependencies = [ - "bellman_ce 0.7.0", - "circuit_encodings 0.150.4", + "circuit_encodings 0.150.5", "derivative", "rayon", "serde", + "zksync_bellman", ] [[package]] @@ -1148,7 +1040,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1257,21 +1149,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", + "cfg-if", ] [[package]] @@ -1280,21 +1158,11 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-deque 0.8.5", - "crossbeam-epoch 0.9.18", - "crossbeam-queue 0.3.11", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] @@ -1303,18 +1171,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1323,23 +1180,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -1348,18 +1190,7 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1368,18 +1199,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] @@ -1476,7 +1296,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1796,7 +1616,7 @@ version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1862,9 +1682,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac97d833b861e32bc0a71d0542bf5c92094f9818c52d65c695227bfa95ffbe3" +checksum = "f76aa50bd291b43ad56fb7da3e63c4c3cecb3c7e19db76c8097856371bc0d84a" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1873,9 +1693,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee6aed60cf09cb6d0b954d74351acb9beb13daab0bacad279691f6b97504b7e6" +checksum = "e7d2db304df6b72141d45b140ec6df68ecd2300a7ab27de18b3e0e3af38c9776" dependencies = [ "serde_json", ] @@ -1896,7 +1716,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "home", "windows-sys 0.48.0", ] @@ -1989,27 +1809,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" dependencies = [ "byteorder", - "ff_derive_ce", "hex", "rand 0.4.6", "serde", ] -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.5", - "num-integer", - "num-traits", - "proc-macro2 1.0.85", - "quote 1.0.36", - "serde", - "syn 1.0.109", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2105,12 +1909,11 @@ dependencies = [ [[package]] name = "franklin-crypto" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05eab544ba915269919b5f158a061b540a4e3a04150c1346481f4f7b80eb6311" +checksum = "971289216ea5c91872e5e0bb6989214b537bbce375d09fabea5c3ccfe031b204" dependencies = [ "arr_macro", - "bellman_ce 0.8.0", "bit-vec", "blake2 0.9.2", "blake2-rfc_bellman_edition", @@ -2134,6 +1937,7 @@ dependencies = [ "smallvec", "splitmut", "tiny-keccak 1.5.0", + "zksync_bellman", ] [[package]] @@ -2300,7 +2104,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi", @@ -3186,7 +2990,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", "sha2 0.10.8", @@ -3198,7 +3002,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", @@ -3230,12 +3034,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" -[[package]] -name = "leb128" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" - [[package]] name = "libc" version = "0.2.155" @@ -3248,7 +3046,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-targets 0.52.5", ] @@ -3258,22 +3056,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" -[[package]] -name = "librocksdb-sys" -version = "0.11.0+8.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" -dependencies = [ - "bindgen 0.65.1", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3285,17 +3067,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3362,16 +3133,6 @@ dependencies = [ "logos-codegen", ] -[[package]] -name = "lz4-sys" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -3393,19 +3154,13 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.7", ] @@ -3421,15 +3176,6 @@ version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "miette" version = "5.10.0" @@ -3556,7 +3302,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "cfg_aliases", "libc", ] @@ -3800,12 +3546,12 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -3832,9 +3578,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -3978,19 +3724,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "pairing_ce" -version = "0.28.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - [[package]] name = "parity-scale-codec" version = "3.6.11" @@ -4039,7 +3772,7 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall 0.5.1", "smallvec", @@ -4663,8 +4396,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.5", - "crossbeam-utils 0.8.20", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -4851,9 +4584,9 @@ dependencies = [ [[package]] name = "rescue_poseidon" -version = "0.5.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27fbc6ba44baf99a0ca8387b1fa1cf90d3d7062860c1afedbbb64454829acc5" +checksum = "82900c877a0ba5362ac5756efbd82c5b795dc509011c1253e2389d8708f1389d" dependencies = [ "addchain", "arrayvec 0.7.4", @@ -4902,7 +4635,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if", "getrandom", "libc", "spin", @@ -4949,16 +4682,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rocksdb" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rsa" version = "0.9.6" @@ -5483,7 +5206,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5495,7 +5218,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5507,7 +5230,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5518,7 +5241,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5572,9 +5295,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e5d862287bb883a4cb0bc4f8ea938ba3fdaa5e495f1a59bc3515231017a0e2" +checksum = "3f11e6942c89861aecb72261f8220800a1b69b8a5463c07c24df75b81fd809b0" dependencies = [ "bincode", "blake2 0.10.6", @@ -5664,9 +5387,9 @@ dependencies = [ [[package]] name = "snark_wrapper" -version = "0.1.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71aa5bffe5e7daca634bf2fedf0bf566273cb7eae01711d1aa6e5223d36d987d" +checksum = "0b5dfdc3eed51d79541adff827593743750fe6626a65006814f8cfa4273371de" dependencies = [ "derivative", "rand 0.4.6", @@ -5769,7 +5492,7 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.11", + "crossbeam-queue", "either", "event-listener", "futures-channel", @@ -6135,7 +5858,7 @@ version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "rustix", "windows-sys 0.52.0", @@ -6213,7 +5936,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -6813,49 +6536,6 @@ dependencies = [ "syn 2.0.66", ] -[[package]] -name = "vk_setup_data_generator_server_fri" -version = "0.1.0" -dependencies = [ - "anyhow", - "bincode", - "circuit_definitions", - "clap 4.5.4", - "hex", - "indicatif", - "itertools 0.10.5", - "md5", - "once_cell", - "proptest", - "serde", - "serde_derive", - "serde_json", - "sha3 0.10.8", - "shivini", - "structopt", - "toml_edit 0.14.4", - "tracing", - "tracing-subscriber", - "zkevm_test_harness", - "zksync_config", - "zksync_env_config", - "zksync_prover_fri_types", - "zksync_types", - "zksync_utils", - "zksync_vlog", -] - -[[package]] -name = "vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=9a38900d7af9b1d72b47ce3be980e77c1239a61d#9a38900d7af9b1d72b47ce3be980e77c1239a61d" -dependencies = [ - "enum_dispatch", - "primitive-types", - "zk_evm_abstractions 0.150.4", - "zkevm_opcode_defs 0.150.4", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -6902,7 +6582,7 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -6927,7 +6607,7 @@ version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -7220,7 +6900,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7230,7 +6910,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7357,9 +7037,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" +checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" dependencies = [ "anyhow", "lazy_static", @@ -7367,7 +7047,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.4", + "zk_evm_abstractions 0.150.5", ] [[package]] @@ -7398,22 +7078,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" +checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] name = "zkevm-assembly" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b69d09d125b94767847c4cdc4ae399654b9e2a2f9304bd8935a7033bef4b07c" +checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" dependencies = [ "env_logger 0.9.3", "hex", @@ -7426,7 +7106,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -7475,13 +7155,12 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" +checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" dependencies = [ "arrayvec 0.7.4", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -7490,7 +7169,8 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", + "zksync_cs_derive", ] [[package]] @@ -7537,9 +7217,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" +checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7554,15 +7234,15 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9416dc5fcf7bc403d4c24d37f0e9a492a81926ff0e89a7792dc8a29de69aec1b" +checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "codegen", - "crossbeam 0.8.4", + "crossbeam", "derivative", "env_logger 0.9.3", "hex", @@ -7581,13 +7261,15 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe099f4f4a2cc8ca8ca591d7619ac00b8054f63b712fa6ceee2b84c6e04c62" +checksum = "aecd7f624185b785e9d8457986ac34685d478e2baa78417d51b102b7d0fa27fd" dependencies = [ "bindgen 0.59.2", - "crossbeam 0.8.4", + "cmake", + "crossbeam", "derivative", + "era_cudart_sys", "futures 0.3.30", "futures-locks", "num_cpus", @@ -7595,13 +7277,13 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73d27e0e4589c7445f5a22e511cb5186e2d205172ca4b26acd7a334b3af9492" +checksum = "a089b11fcdbd37065acaf427545cb50b87e6712951a10f3761b3d370e4b8f9bc" dependencies = [ "bit-vec", - "cfg-if 1.0.0", - "crossbeam 0.8.4", + "cfg-if", + "crossbeam", "franklin-crypto", "itertools 0.10.5", "num_cpus", @@ -7612,9 +7294,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.4" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cf4c09adf0a84af0d7ded1fd85a2487fef4cbf1cfc1925412717d0eef03dd5a" +checksum = "dc764c21d4ae15c5bc2c07c14c814c5e3ba8d194ddcca543b8cec95456031832" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7639,11 +7321,34 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_bellman" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffa03efe9bdb137a4b36b97d1a74237e18c9ae42b755163d903a9d48c1a5d80" +dependencies = [ + "arrayvec 0.7.4", + "bit-vec", + "blake2s_simd", + "byteorder", + "cfg-if", + "crossbeam", + "futures 0.3.30", + "hex", + "lazy_static", + "num_cpus", + "rand 0.4.6", + "serde", + "smallvec", + "tiny-keccak 1.5.0", + "zksync_pairing", +] + [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", @@ -7677,9 +7382,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" dependencies = [ "anyhow", "blst", @@ -7690,7 +7395,6 @@ dependencies = [ "k256 0.13.3", "num-bigint 0.4.5", "num-traits", - "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -7701,9 +7405,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" dependencies = [ "anyhow", "bit-vec", @@ -7723,9 +7427,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" dependencies = [ "anyhow", "async-trait", @@ -7743,9 +7447,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", "rand 0.8.5", @@ -7775,9 +7479,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -7799,6 +7501,18 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_cs_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "zksync_dal" version = "0.1.0" @@ -7845,7 +7559,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", ] [[package]] @@ -7887,24 +7600,38 @@ dependencies = [ ] [[package]] -name = "zksync_health_check" -version = "0.1.0" +name = "zksync_ff" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9524b06780b5e164e84b38840c7c428c739f051f35af6efc4d1285f629ceb88e" dependencies = [ - "async-trait", - "futures 0.3.30", + "byteorder", + "hex", + "rand 0.4.6", "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "vise", + "zksync_ff_derive", +] + +[[package]] +name = "zksync_ff_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" +dependencies = [ + "num-bigint 0.4.5", + "num-integer", + "num-traits", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde", + "syn 1.0.109", ] [[package]] name = "zksync_kzg" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" +checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" dependencies = [ "boojum", "derivative", @@ -7914,26 +7641,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.4", -] - -[[package]] -name = "zksync_merkle_tree" -version = "0.1.0" -dependencies = [ - "anyhow", - "leb128", - "once_cell", - "rayon", - "thiserror", - "thread_local", - "tracing", - "vise", - "zksync_crypto_primitives", - "zksync_prover_interface", - "zksync_storage", - "zksync_types", - "zksync_utils", + "zkevm_circuits 0.150.5", ] [[package]] @@ -7950,11 +7658,11 @@ name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.133.0", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.133.1", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.142.2", + "circuit_sequencer_api 0.150.5", "hex", "itertools 0.10.5", "once_cell", @@ -7962,40 +7670,19 @@ dependencies = [ "thiserror", "tracing", "vise", - "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm2", "zksync_vm_interface", ] -[[package]] -name = "zksync_node_genesis" -version = "0.1.0" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_merkle_tree", - "zksync_multivm", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_object_store" version = "0.1.0" @@ -8019,6 +7706,19 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_pairing" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8412ae5574472fa567a097e183f9a01974b99dd0b5da3bfa1bbe6c57c579aa2" +dependencies = [ + "byteorder", + "cfg-if", + "rand 0.4.6", + "serde", + "zksync_ff", +] + [[package]] name = "zksync_proof_fri_compressor" version = "0.1.0" @@ -8026,7 +7726,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8037,7 +7737,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vk_setup_data_generator_server_fri", "zkevm_test_harness", "zksync-wrapper-prover", "zksync_config", @@ -8047,6 +7746,7 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_interface", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -8055,9 +7755,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -8076,9 +7776,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck 0.5.0", @@ -8137,8 +7837,8 @@ dependencies = [ "shivini", "tokio", "tracing", + "tracing-subscriber", "vise", - "vk_setup_data_generator_server_fri", "zkevm_test_harness", "zksync_config", "zksync_core_leftovers", @@ -8147,6 +7847,7 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -8212,7 +7913,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "serde", "serde_with", "strum", @@ -8241,27 +7942,36 @@ dependencies = [ ] [[package]] -name = "zksync_queued_job_processor" +name = "zksync_prover_keystore" version = "0.1.0" dependencies = [ "anyhow", - "async-trait", - "tokio", + "bincode", + "circuit_definitions", + "hex", + "md5", + "once_cell", + "serde", + "serde_json", + "sha3 0.10.8", + "shivini", "tracing", - "vise", + "zkevm_test_harness", + "zksync_basic_types", + "zksync_prover_fri_types", "zksync_utils", ] [[package]] -name = "zksync_storage" +name = "zksync_queued_job_processor" version = "0.1.0" dependencies = [ - "num_cpus", - "once_cell", - "rocksdb", - "thread_local", + "anyhow", + "async-trait", + "tokio", "tracing", "vise", + "zksync_utils", ] [[package]] @@ -8314,7 +8024,6 @@ dependencies = [ "bigdecimal", "futures 0.3.30", "hex", - "itertools 0.10.5", "num", "once_cell", "reqwest 0.12.5", @@ -8328,6 +8037,26 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "zksync_vk_setup_data_generator_server_fri" +version = "0.1.0" +dependencies = [ + "anyhow", + "circuit_definitions", + "clap 4.5.4", + "indicatif", + "proptest", + "toml_edit 0.14.4", + "tracing", + "tracing-subscriber", + "zkevm_test_harness", + "zksync_prover_fri_types", + "zksync_prover_keystore", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_vlog" version = "0.1.0" @@ -8353,10 +8082,32 @@ dependencies = [ "vise-exporter", ] +[[package]] +name = "zksync_vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", + "zksync_vm2_interface", +] + +[[package]] +name = "zksync_vm2_interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "primitive-types", +] + [[package]] name = "zksync_vm_interface" version = "0.1.0" dependencies = [ + "anyhow", + "async-trait", "hex", "serde", "thiserror", @@ -8407,7 +8158,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vk_setup_data_generator_server_fri", "zkevm_test_harness", "zksync_config", "zksync_core_leftovers", @@ -8418,6 +8168,7 @@ dependencies = [ "zksync_prover_fri_types", "zksync_prover_fri_utils", "zksync_prover_interface", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_system_constants", "zksync_types", @@ -8437,7 +8188,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vk_setup_data_generator_server_fri", "zksync_config", "zksync_core_leftovers", "zksync_env_config", @@ -8445,18 +8195,9 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", "zksync_vlog", ] - -[[package]] -name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" -dependencies = [ - "cc", - "pkg-config", -] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 88b5b626704b..624661adc8dc 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -52,17 +52,17 @@ tempfile = "3" tokio = "1" toml_edit = "0.14.4" tracing = "0.1" -tracing-subscriber = { version = "0.3" } +tracing-subscriber = "0.3" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.4" -circuit_sequencer_api = "=0.150.4" -zkevm_test_harness = "=0.150.4" +circuit_definitions = "=0.150.5" +circuit_sequencer_api = "=0.150.5" +zkevm_test_harness = "=0.150.5" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.4" } -shivini = "=0.150.4" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.7" } +shivini = "=0.150.7" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } @@ -87,7 +87,8 @@ zksync_periodic_job = { path = "../core/lib/periodic_job" } zksync_prover_dal = { path = "crates/lib/prover_dal" } zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } -vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_keystore = { path = "crates/lib/keystore" } +zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/proof_fri_compressor/Cargo.toml b/prover/crates/bin/proof_fri_compressor/Cargo.toml index a44244c97b57..936f0cb5100b 100644 --- a/prover/crates/bin/proof_fri_compressor/Cargo.toml +++ b/prover/crates/bin/proof_fri_compressor/Cargo.toml @@ -21,7 +21,7 @@ zksync_utils.workspace = true zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_queued_job_processor.workspace = true -vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true zksync_vlog.workspace = true circuit_sequencer_api.workspace = true diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index 067114ca5a6c..e462097e38d0 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -23,9 +23,9 @@ use zksync_prover_fri_types::{ get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper, }; use zksync_prover_interface::outputs::L1BatchProofForL1; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; @@ -35,7 +35,7 @@ pub struct ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl ProofCompressor { @@ -45,7 +45,7 @@ impl ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { blob_store, @@ -53,18 +53,16 @@ impl ProofCompressor { compression_mode, max_attempts, protocol_version, - setup_data_path, + keystore, } } #[tracing::instrument(skip(proof, _compression_mode))] pub fn compress_proof( - l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, @@ -172,16 +170,13 @@ impl JobProcessor for ProofCompressor { async fn process_job( &self, - job_id: &L1BatchNumber, + _job_id: &L1BatchNumber, job: ZkSyncRecursionLayerProof, _started_at: Instant, ) -> JoinHandle> { let compression_mode = self.compression_mode; - let block_number = *job_id; - let setup_data_path = self.setup_data_path.clone(); - tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode, setup_data_path) - }) + let keystore = self.keystore.clone(); + tokio::task::spawn_blocking(move || Self::compress_proof(job, compression_mode, keystore)) } async fn save_result( diff --git a/prover/crates/bin/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs index e2086b228b69..f06b4b8f89e5 100644 --- a/prover/crates/bin/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -11,6 +11,7 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -70,16 +71,18 @@ async fn main() -> anyhow::Result<()> { let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; + let prover_config = general_config + .prover_config + .expect("ProverConfig doesn't exist"); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let proof_compressor = ProofCompressor::new( blob_store, pool, config.compression_mode, config.max_attempts, protocol_version, - general_config - .prover_config - .expect("ProverConfig doesn't exist") - .setup_data_path, + keystore, ); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/prover/crates/bin/prover_cli/README.md b/prover/crates/bin/prover_cli/README.md index 2d57e0b56495..e0dd1697bf6d 100644 --- a/prover/crates/bin/prover_cli/README.md +++ b/prover/crates/bin/prover_cli/README.md @@ -9,6 +9,12 @@ git clone git@github.com:matter-labs/zksync-era.git cargo install prover_cli ``` +Or + +``` +cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked prover_cli --force +``` + ## Usage ``` diff --git a/prover/crates/bin/prover_cli/src/commands/insert_version.rs b/prover/crates/bin/prover_cli/src/commands/insert_version.rs index 7f30719a713b..e89d2024e26f 100644 --- a/prover/crates/bin/prover_cli/src/commands/insert_version.rs +++ b/prover/crates/bin/prover_cli/src/commands/insert_version.rs @@ -35,7 +35,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { let protocol_version_patch = VersionPatch(args.patch); - let snark_wrapper = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { + let snark_wrapper_vk_hash = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { panic!("Invalid snark wrapper hash"); }); @@ -43,7 +43,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { .save_prover_protocol_version( ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), L1VerifierConfig { - recursion_scheduler_level_vk_hash: snark_wrapper, + snark_wrapper_vk_hash, }, ) .await; diff --git a/prover/crates/bin/prover_cli/src/commands/status/l1.rs b/prover/crates/bin/prover_cli/src/commands/status/l1.rs index 16cecc103828..4b403215e9c2 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/l1.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/l1.rs @@ -78,7 +78,7 @@ pub(crate) async fn run() -> anyhow::Result<()> { .await?; let node_l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: node_verification_key_hash, + snark_wrapper_vk_hash: node_verification_key_hash, }; let prover_connection_pool = ConnectionPool::::builder( @@ -149,7 +149,7 @@ fn pretty_print_l1_verifier_config( ) { print_hash_comparison( "Verifier key", - node_l1_verifier_config.recursion_scheduler_level_vk_hash, - db_l1_verifier_config.recursion_scheduler_level_vk_hash, + node_l1_verifier_config.snark_wrapper_vk_hash, + db_l1_verifier_config.snark_wrapper_vk_hash, ); } diff --git a/prover/crates/bin/prover_cli/src/config/mod.rs b/prover/crates/bin/prover_cli/src/config/mod.rs index 3d99f2be3b2c..b3df2e7d2c56 100644 --- a/prover/crates/bin/prover_cli/src/config/mod.rs +++ b/prover/crates/bin/prover_cli/src/config/mod.rs @@ -1,12 +1,12 @@ use std::{io::Write, path::PathBuf}; -use crate::helper::core_workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; pub fn get_envfile() -> anyhow::Result { if let Ok(envfile) = std::env::var("PLI__CONFIG") { return Ok(envfile.into()); } - Ok(core_workspace_dir_or_current_dir().join("etc/pliconfig")) + Ok(Workspace::locate().core().join("etc/pliconfig")) } pub fn load_envfile(path: impl AsRef) -> anyhow::Result<()> { diff --git a/prover/crates/bin/prover_cli/src/helper.rs b/prover/crates/bin/prover_cli/src/helper.rs index 352a789baed7..7fe0c990e4e0 100644 --- a/prover/crates/bin/prover_cli/src/helper.rs +++ b/prover/crates/bin/prover_cli/src/helper.rs @@ -1,10 +1,7 @@ -use std::{ - fs::File, - path::{Path, PathBuf}, -}; +use std::{fs::File, path::PathBuf}; use zksync_types::ethabi::Contract; -use zksync_utils::locate_workspace; +use zksync_utils::env::Workspace; const ZKSYNC_HYPERCHAIN_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/state-transition/chain-interfaces/IZkSyncHyperchain.sol/IZkSyncHyperchain.json"; @@ -27,8 +24,7 @@ fn read_file_to_json_value(path: &PathBuf) -> serde_json::Value { } fn load_contract_if_present(path: &str) -> Contract { - let home = core_workspace_dir_or_current_dir(); - let path = Path::new(&home).join(path); + let path = Workspace::locate().core().join(path); path.exists() .then(|| { serde_json::from_value(read_file_to_json_value(&path)["abi"].take()).unwrap_or_else( @@ -39,9 +35,3 @@ fn load_contract_if_present(path: &str) -> Contract { panic!("Failed to load contract from {:?}", path); }) } - -pub fn core_workspace_dir_or_current_dir() -> PathBuf { - locate_workspace() - .map(|a| a.join("..")) - .unwrap_or_else(|| PathBuf::from(".")) -} diff --git a/prover/crates/bin/prover_fri/Cargo.toml b/prover/crates/bin/prover_fri/Cargo.toml index 0d2e92be0481..e41244cecbf7 100644 --- a/prover/crates/bin/prover_fri/Cargo.toml +++ b/prover/crates/bin/prover_fri/Cargo.toml @@ -22,14 +22,14 @@ zksync_prover_fri_utils.workspace = true zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_utils.workspace = true -vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true shivini = { workspace = true, optional = true, features = [ "circuit_definitions", "zksync", ] } zkevm_test_harness.workspace = true -circuit_definitions = { workspace = true, features = [ "log_tracing" ] } +circuit_definitions = { workspace = true, features = ["log_tracing"] } anyhow.workspace = true tracing.workspace = true @@ -43,6 +43,9 @@ reqwest = { workspace = true, features = ["blocking"] } regex.workspace = true clap = { workspace = true, features = ["derive"] } +[dev-dependencies] +tracing-subscriber.workspace = true + [features] default = [] -gpu = ["shivini", "vk_setup_data_generator_server_fri/gpu"] +gpu = ["shivini", "zksync_prover_keystore/gpu"] diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index dc8594cbdc1b..cfd588c26662 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -5,10 +5,12 @@ pub mod gpu_prover { use anyhow::Context as _; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, }; use tokio::task::JoinHandle; - use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; - use zksync_env_config::FromEnv; + use zksync_config::configs::{ + fri_prover::SetupLoadMode as SetupLoadModeConfig, FriProverConfig, + }; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, ProverDal}; use zksync_prover_fri_types::{ @@ -29,12 +31,12 @@ pub mod gpu_prover { CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::region_fetcher::Zone; + use zksync_prover_keystore::{keystore::Keystore, GoldilocksGpuProverSetupData}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, prover_dal::SocketAddress, }; - use zksync_vk_setup_data_server_fri::{keystore::Keystore, GoldilocksGpuProverSetupData}; use crate::{ metrics::METRICS, @@ -54,6 +56,7 @@ pub mod gpu_prover { #[allow(dead_code)] pub struct Prover { + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: Arc, @@ -72,6 +75,7 @@ pub mod gpu_prover { impl Prover { #[allow(dead_code)] pub fn new( + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, @@ -82,8 +86,17 @@ pub mod gpu_prover { address: SocketAddress, zone: Zone, protocol_version: ProtocolSemanticVersion, + max_allocation: Option, ) -> Self { + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .expect("failed initializing gpu prover context"), + None => ProverContext::create().expect("failed initializing gpu prover context"), + }; Prover { + keystore, blob_store, public_blob_store, config: Arc::new(config), @@ -91,8 +104,7 @@ pub mod gpu_prover { setup_load_mode, circuit_ids_for_round_to_be_proven, witness_vector_queue, - prover_context: ProverContext::create() - .expect("failed initializing gpu prover context"), + prover_context, address, zone, protocol_version, @@ -112,9 +124,8 @@ pub mod gpu_prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksGpuProverSetupData = keystore + let artifact: GoldilocksGpuProverSetupData = self + .keystore .load_gpu_setup_data_for_circuit_type(key.clone()) .context("load_gpu_setup_data_for_circuit_type()")?; @@ -173,8 +184,11 @@ pub mod gpu_prover { (), &worker, ) - .unwrap_or_else(|_| { - panic!("failed generating GPU proof for id: {}", prover_job.job_id) + .unwrap_or_else(|err| { + panic!( + "failed generating GPU proof for id: {}, error: {:?}", + prover_job.job_id, err + ) }); tracing::info!( "Successfully generated gpu proof for job {} took: {:?}", @@ -328,36 +342,84 @@ pub mod gpu_prover { } } - pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { - Ok(match config.setup_load_mode { - zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, - zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { + #[tracing::instrument(skip_all, fields(setup_load_mode = ?setup_load_mode, specialized_group_id = %specialized_group_id))] + pub async fn load_setup_data_cache( + keystore: &Keystore, + setup_load_mode: SetupLoadModeConfig, + specialized_group_id: u8, + circuit_ids: &[CircuitIdRoundTuple], + ) -> anyhow::Result { + Ok(match setup_load_mode { + SetupLoadModeConfig::FromDisk => SetupLoadMode::FromDisk, + SetupLoadModeConfig::FromMemory => { + anyhow::ensure!( + !circuit_ids.is_empty(), + "Circuit IDs must be provided when using FromMemory mode" + ); let mut cache = HashMap::new(); tracing::info!( "Loading setup data cache for group {}", - &config.specialized_group_id + &specialized_group_id ); - let prover_setup_metadata_list = FriProverGroupConfig::from_env() - .context("FriProverGroupConfig::from_env()")? - .get_circuit_ids_for_group_id(config.specialized_group_id) - .context( - "At least one circuit should be configured for group when running in FromMemory mode", - )?; tracing::info!( "for group {} configured setup metadata are {:?}", - &config.specialized_group_id, - prover_setup_metadata_list + &specialized_group_id, + circuit_ids ); - let keystore = Keystore::new_with_setup_data_path(config.setup_data_path.clone()); - for prover_setup_metadata in prover_setup_metadata_list { - let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); - let setup_data = keystore - .load_gpu_setup_data_for_circuit_type(key.clone()) - .context("load_gpu_setup_data_for_circuit_type()")?; - cache.insert(key, Arc::new(setup_data)); + // Load each file in parallel. Note that FS access is not necessarily parallel, but + // deserialization is (and it's not insignificant, as setup keys are large). + // Note: `collect` is important, because iterators are lazy and otherwise we won't actually + // spawn threads. + let handles: Vec<_> = circuit_ids + .into_iter() + .map(|prover_setup_metadata| { + let keystore = keystore.clone(); + let prover_setup_metadata = prover_setup_metadata.clone(); + tokio::task::spawn_blocking(move || { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data = keystore + .load_gpu_setup_data_for_circuit_type(key.clone()) + .context("load_gpu_setup_data_for_circuit_type()")?; + anyhow::Ok((key, Arc::new(setup_data))) + }) + }) + .collect(); + for handle in futures::future::join_all(handles).await { + let (key, setup_data) = handle.context("Key loading future panicked")??; + cache.insert(key, setup_data); } SetupLoadMode::FromMemory(cache) } }) } + + #[cfg(test)] + mod tests { + use zksync_types::basic_fri_types::AggregationRound; + + use super::*; + + #[tokio::test] + async fn test_load_setup_data_cache() { + tracing_subscriber::fmt::try_init().ok(); + + let keystore = Keystore::locate(); + let mode = SetupLoadModeConfig::FromMemory; + let specialized_group_id = 0; + let ids: Vec<_> = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|r| r.circuit_ids()) + .collect(); + if !keystore.is_setup_data_present(&setup_metadata_to_setup_data_key(&ids[0])) { + // We don't want this test to fail on envs where setup keys are not present. + return; + } + + let start = Instant::now(); + let _cache = load_setup_data_cache(&keystore, mode, specialized_group_id, &ids) + .await + .expect("Unable to load keys"); + tracing::info!("Cache load time: {:?}", start.elapsed()); + } + } } diff --git a/prover/crates/bin/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs index db813394c194..cbba8d0ddb4f 100644 --- a/prover/crates/bin/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -139,6 +139,7 @@ async fn main() -> anyhow::Result<()> { public_blob_store, pool, circuit_ids_for_round_to_be_proven, + opt.max_allocation, notify, ) .await @@ -178,8 +179,11 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + _max_allocation: Option, _init_notifier: Arc, ) -> anyhow::Result>>> { + use zksync_prover_keystore::keystore::Keystore; + use crate::prover_job_processor::{load_setup_data_cache, Prover}; let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; @@ -189,12 +193,15 @@ async fn get_prover_tasks( protocol_version ); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let setup_load_mode = - load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + load_setup_data_cache(&keystore, &prover_config).context("load_setup_data_cache()")?; let prover = Prover::new( store_factory.create_store().await?, public_blob_store, prover_config, + keystore, pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -213,15 +220,25 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + max_allocation: Option, init_notifier: Arc, ) -> anyhow::Result>>> { use gpu_prover_job_processor::gpu_prover; use socket_listener::gpu_socket_listener; use tokio::sync::Mutex; use zksync_prover_fri_types::queue::FixedSizeQueue; + use zksync_prover_keystore::keystore::Keystore; - let setup_load_mode = - gpu_prover::load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let setup_load_mode = gpu_prover::load_setup_data_cache( + &keystore, + prover_config.setup_load_mode, + prover_config.specialized_group_id, + &circuit_ids_for_round_to_be_proven, + ) + .await + .context("load_setup_data_cache()")?; let witness_vector_queue = FixedSizeQueue::new(prover_config.queue_capacity); let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); let consumer = shared_witness_vector_queue.clone(); @@ -235,6 +252,7 @@ async fn get_prover_tasks( let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( + keystore, store_factory.create_store().await?, public_blob_store, prover_config.clone(), @@ -245,6 +263,7 @@ async fn get_prover_tasks( address.clone(), zone.clone(), protocol_version, + max_allocation, ); let producer = shared_witness_vector_queue.clone(); @@ -295,4 +314,6 @@ pub(crate) struct Cli { pub(crate) config_path: Option, #[arg(long)] pub(crate) secrets_path: Option, + #[arg(long)] + pub(crate) max_allocation: Option, } diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 2df1b626497f..bbfb1d5a8322 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -20,11 +20,11 @@ use zksync_prover_fri_types::{ CircuitWrapper, FriProofWrapper, ProverJob, ProverServiceDataKey, }; use zksync_prover_fri_utils::fetch_next_circuit; +use zksync_prover_keystore::{keystore::Keystore, GoldilocksProverSetupData}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, GoldilocksProverSetupData}; use crate::{ metrics::{CircuitLabels, Layer, METRICS}, @@ -43,6 +43,7 @@ pub struct Prover { blob_store: Arc, public_blob_store: Option>, config: Arc, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. @@ -52,11 +53,12 @@ pub struct Prover { } impl Prover { - #[allow(dead_code)] + #[allow(dead_code, clippy::too_many_arguments)] pub fn new( blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, @@ -66,6 +68,7 @@ impl Prover { blob_store, public_blob_store, config: Arc::new(config), + keystore, prover_connection_pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -85,9 +88,8 @@ impl Prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksProverSetupData = keystore + let artifact: GoldilocksProverSetupData = self + .keystore .load_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] @@ -279,7 +281,10 @@ impl JobProcessor for Prover { } #[allow(dead_code)] -pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { +pub fn load_setup_data_cache( + keystore: &Keystore, + config: &FriProverConfig, +) -> anyhow::Result { Ok(match config.setup_load_mode { zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { @@ -299,7 +304,6 @@ pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result ProverServiceDataKey { - ProverServiceDataKey { - circuit_id: setup_metadata.circuit_id, - round: setup_metadata.aggregation_round.into(), + let round = setup_metadata.aggregation_round.into(); + match round { + AggregationRound::NodeAggregation => { + // For node aggregation only one key exist for all circuit types + ProverServiceDataKey { + circuit_id: ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + round, + } + } + _ => ProverServiceDataKey { + circuit_id: setup_metadata.circuit_id, + round, + }, } } diff --git a/prover/crates/bin/prover_fri/tests/basic_test.rs b/prover/crates/bin/prover_fri/tests/basic_test.rs index b6d6226e6967..1cf376bf8157 100644 --- a/prover/crates/bin/prover_fri/tests/basic_test.rs +++ b/prover/crates/bin/prover_fri/tests/basic_test.rs @@ -9,10 +9,10 @@ use zksync_prover_fri::prover_job_processor::Prover; use zksync_prover_fri_types::{ keys::FriCircuitKey, CircuitWrapper, ProverJob, ProverServiceDataKey, }; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::{ +use zksync_prover_keystore::{ keystore::Keystore, setup_data_generator::generate_setup_data_common, }; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; fn compare_serialized(expected: &T, actual: &T) { let serialized_expected = bincode::serialize(expected).unwrap(); @@ -57,7 +57,7 @@ async fn prover_and_assert_base_layer( CircuitWrapper::Base(base) => base.clone(), _ => anyhow::bail!("Expected base layer circuit"), }; - let keystore = Keystore::default(); + let keystore = Keystore::locate(); let circuit_setup_data = generate_setup_data_common( &keystore, ProverServiceDataKey::new_basic(circuit.numeric_circuit_type()), diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index e585c06ad779..734a4bac38a2 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -37,7 +37,6 @@ async fn main() -> anyhow::Result<()> { let general_config = load_general_config(opt.config_path).context("general config")?; - println!("general_config = {general_config:?}"); let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; let observability_config = general_config diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs index c5eab586e7cf..5f507a753649 100644 --- a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -35,12 +35,15 @@ impl WitnessGeneratorQueueReporter { ); } - SERVER_METRICS.witness_generator_jobs_by_round - [&("queued", round.to_string(), protocol_version.to_string())] + SERVER_METRICS.witness_generator_jobs_by_round[&( + "queued", + format!("{:?}", round), + protocol_version.to_string(), + )] .set(stats.queued as u64); SERVER_METRICS.witness_generator_jobs_by_round[&( "in_progress", - round.to_string(), + format!("{:?}", round), protocol_version.to_string(), )] .set(stats.in_progress as u64); diff --git a/prover/crates/bin/prover_version/Cargo.toml b/prover/crates/bin/prover_version/Cargo.toml index 0275b4169b72..7ad602ec889e 100644 --- a/prover/crates/bin/prover_version/Cargo.toml +++ b/prover/crates/bin/prover_version/Cargo.toml @@ -1,7 +1,13 @@ [package] name = "prover_version" -version = "0.1.0" +version.workspace = true edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true [dependencies] zksync_prover_fri_types.workspace = true diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml index edae9764438f..4830f2277a79 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "vk_setup_data_generator_server_fri" +name = "zksync_vk_setup_data_generator_server_fri" version.workspace = true edition.workspace = true authors.workspace = true @@ -14,37 +14,21 @@ categories.workspace = true name = "key_generator" path = "src/main.rs" -[lib] -name = "zksync_vk_setup_data_server_fri" -path = "src/lib.rs" - [dependencies] zksync_vlog.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true zksync_prover_fri_types.workspace = true +zksync_prover_keystore.workspace = true +zksync_utils.workspace = true zkevm_test_harness.workspace = true circuit_definitions = { workspace = true, features = ["log_tracing"] } -shivini = { workspace = true, optional = true } -zksync_config.workspace = true -zksync_env_config.workspace = true anyhow.workspace = true clap = { workspace = true, features = ["derive"] } tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter"] } -serde_json.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_derive.workspace = true -itertools.workspace = true -bincode.workspace = true -structopt.workspace = true -once_cell.workspace = true toml_edit.workspace = true -md5.workspace = true -sha3.workspace = true -hex.workspace = true indicatif.workspace = true [dev-dependencies] @@ -52,4 +36,4 @@ proptest.workspace = true [features] default = [] -gpu = ["shivini"] +gpu = ["zksync_prover_keystore/gpu"] diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt b/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt deleted file mode 100644 index 7e50d86cb4f8..000000000000 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/proptest-regressions/tests.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Seeds for failure cases proptest has generated in the past. It is -# automatically read and these particular cases re-run before any -# novel cases are generated. -# -# It is recommended to check this file in to source control so that -# everyone who runs the test benefits from these saved cases. -cc ca181a7669a6e07b68bce71c8c723efcb8fd2a4e895fc962ca1d33ce5f8188f7 # shrinks to circuit_id = 1 -cc ce71957c410fa7af30e04b3e85423555a8e1bbd26b4682b748fa67162bc5687f # shrinks to circuit_id = 1 -cc 6d3b0c60d8a5e7d7dc3bb4a2a21cce97461827583ae01b2414345175a02a1221 # shrinks to key = ProverServiceDataKey { circuit_id: 1, round: BasicCircuits } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs index fe4d5b2482a4..f92be40fd7cc 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -1,13 +1,15 @@ use anyhow::Context; -use zksync_vk_setup_data_server_fri::{ - commitment_utils::generate_commitments, - keystore::Keystore, - vk_commitment_helper::{get_toml_formatted_value, read_contract_toml, write_contract_toml}, +use zksync_prover_keystore::keystore::Keystore; + +use crate::vk_commitment_helper::{ + get_toml_formatted_value, read_contract_toml, write_contract_toml, }; pub fn read_and_update_contract_toml(keystore: &Keystore, dryrun: bool) -> anyhow::Result<()> { let mut contract_doc = read_contract_toml().context("read_contract_toml()")?; - let vk_commitments = generate_commitments(keystore).context("generate_commitments()")?; + let vk_commitments = keystore + .generate_commitments() + .context("generate_commitments()")?; contract_doc["contracts"]["FRI_RECURSION_LEAF_LEVEL_VK_HASH"] = get_toml_formatted_value(vk_commitments.leaf); @@ -32,6 +34,6 @@ mod test { #[test] fn test_read_and_update_contract_toml() { - read_and_update_contract_toml(&Keystore::default(), true).unwrap(); + read_and_update_contract_toml(&Keystore::locate(), true).unwrap(); } } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs deleted file mode 100644 index 792efba35adc..000000000000 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::{str::FromStr, sync::Mutex}; - -use anyhow::Context as _; -use hex::ToHex; -use once_cell::sync::Lazy; -use zkevm_test_harness::witness::recursive_aggregation::{ - compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, -}; -use zksync_prover_fri_types::circuit_definitions::{ - boojum::field::goldilocks::GoldilocksField, - circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, -}; -use zksync_types::{protocol_version::L1VerifierConfig, H256}; - -use crate::{ - keystore::Keystore, - utils::{calculate_snark_vk_hash, get_leaf_vk_params}, - VkCommitments, -}; - -static KEYSTORE: Lazy>> = Lazy::new(|| Mutex::new(None)); - -fn circuit_commitments(keystore: &Keystore) -> anyhow::Result { - let commitments = generate_commitments(keystore).context("generate_commitments()")?; - Ok(L1VerifierConfig { - // Instead of loading the FRI scheduler verification key here, - // we load the SNARK-wrapper verification key. - // This is due to the fact that these keys are used only for picking the - // prover jobs / witgen jobs from the DB. The keys are matched with the ones in - // `prover_fri_protocol_versions` table, which has the SNARK-wrapper verification key. - // This is OK because if the FRI VK changes, the SNARK-wrapper VK will change as well. - recursion_scheduler_level_vk_hash: H256::from_str(&commitments.snark_wrapper) - .context("invalid SNARK wrapper VK")?, - }) -} - -pub fn generate_commitments(keystore: &Keystore) -> anyhow::Result { - let leaf_vk_params = get_leaf_vk_params(keystore).context("get_leaf_vk_params()")?; - let leaf_layer_params = leaf_vk_params - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); - - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; - let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); - - let scheduler_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; - let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); - - let hex_concatenator = |hex_array: [GoldilocksField; 4]| { - "0x".to_owned() - + &hex_array - .iter() - .map(|x| format!("{:016x}", x.0)) - .collect::>() - .join("") - }; - - let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); - let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); - let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); - let snark_vk_hash: String = calculate_snark_vk_hash(keystore)?.encode_hex(); - - let result = VkCommitments { - leaf: leaf_aggregation_commitment_hex, - node: node_aggregation_commitment_hex, - scheduler: scheduler_commitment_hex, - snark_wrapper: format!("0x{}", snark_vk_hash), - }; - tracing::info!("Commitments: {:?}", result); - Ok(result) -} - -pub fn get_cached_commitments(setup_data_path: Option) -> L1VerifierConfig { - if let Some(setup_data_path) = setup_data_path { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); - let mut keystore_lock = KEYSTORE.lock().unwrap(); - *keystore_lock = Some(keystore); - } - - let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); - let commitments = circuit_commitments(&keystore).unwrap(); - - tracing::info!("Using cached commitments {:?}", commitments); - commitments -} diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs index da86f931b1c2..59d989037c4b 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -1,7 +1,7 @@ //! Tool to generate different types of keys used by the proving system. //! //! It can generate verification keys, setup keys, and also commitments. -use std::collections::HashMap; +use std::{collections::HashMap, path::PathBuf}; use anyhow::Context as _; use clap::{Parser, Subcommand}; @@ -23,13 +23,13 @@ use zksync_prover_fri_types::{ circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, ProverServiceDataKey, }; -use zksync_vk_setup_data_server_fri::{ - commitment_utils::generate_commitments, +use zksync_prover_keystore::{ keystore::Keystore, setup_data_generator::{CPUSetupDataGenerator, GPUSetupDataGenerator, SetupDataGenerator}, }; mod commitment_generator; +mod vk_commitment_helper; #[cfg(test)] mod tests; @@ -97,7 +97,8 @@ fn generate_vks(keystore: &Keystore, jobs: usize, quiet: bool) -> anyhow::Result } // Let's also update the commitments file. - keystore.save_commitments(&generate_commitments(keystore)?) + let commitments = keystore.generate_commitments()?; + keystore.save_commitments(&commitments) } #[derive(Debug, Parser)] @@ -195,14 +196,14 @@ fn print_stats(digests: HashMap) -> anyhow::Result<()> { Ok(()) } -fn keystore_from_optional_path(path: Option, setup_path: Option) -> Keystore { +fn keystore_from_optional_path(path: Option, setup_data_path: Option) -> Keystore { if let Some(path) = path { - return Keystore::new_with_optional_setup_path(path.into(), setup_path); + return Keystore::new(path.into()).with_setup_path(setup_data_path.map(PathBuf::from)); } - if setup_path.is_some() { + if setup_data_path.is_some() { panic!("--setup_path must not be set when --path is not set"); } - Keystore::default() + Keystore::locate() } fn generate_setup_keys( diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs index 39b5f7a44fb8..0a9548197fd7 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs @@ -6,8 +6,8 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::basic_fri_types::AggregationRound; -use zksync_vk_setup_data_server_fri::keystore::Keystore; fn all_possible_prover_service_data_key() -> impl Strategy { let mut keys = Vec::with_capacity(30); @@ -36,21 +36,21 @@ fn all_possible_prover_service_data_key() -> impl Strategy Item { let mut value = Value::from(string_value); @@ -24,5 +23,7 @@ pub fn read_contract_toml() -> anyhow::Result { } pub fn get_contract_toml_path() -> PathBuf { - core_workspace_dir_or_current_dir().join("etc/env/base/contracts.toml") + Workspace::locate() + .core() + .join("etc/env/base/contracts.toml") } diff --git a/prover/crates/bin/witness_generator/Cargo.toml b/prover/crates/bin/witness_generator/Cargo.toml index cffb55906065..bb6a44e7eb33 100644 --- a/prover/crates/bin/witness_generator/Cargo.toml +++ b/prover/crates/bin/witness_generator/Cargo.toml @@ -22,7 +22,7 @@ zksync_multivm.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true zksync_utils.workspace = true -vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true zksync_prover_fri_types.workspace = true zksync_prover_fri_utils.workspace = true zksync_core_leftovers.workspace = true diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs new file mode 100644 index 000000000000..f509d3b2f64a --- /dev/null +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -0,0 +1,50 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; + +#[derive(Debug)] +pub(crate) struct AggregationBlobUrls { + pub aggregations_urls: String, + pub circuit_ids_and_urls: Vec<(u8, String)>, +} + +#[derive(Debug)] +pub(crate) struct SchedulerBlobUrls { + pub circuit_ids_and_urls: Vec<(u8, String)>, + pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, + pub scheduler_witness_url: String, +} + +pub(crate) enum BlobUrls { + Url(String), + Aggregation(AggregationBlobUrls), + Scheduler(SchedulerBlobUrls), +} + +#[async_trait] +pub(crate) trait ArtifactsManager { + type InputMetadata; + type InputArtifacts; + type OutputArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result; + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls; + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()>; +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs new file mode 100644 index 000000000000..3447659f8296 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs @@ -0,0 +1,108 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::AuxOutputWitnessWrapper; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + utils::SchedulerPartialInputWrapper, +}; + +#[async_trait] +impl ArtifactsManager for BasicWitnessGenerator { + type InputMetadata = L1BatchNumber; + type InputArtifacts = BasicWitnessGeneratorJob; + type OutputArtifacts = BasicCircuitArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let l1_batch_number = *metadata; + let data = object_store.get(l1_batch_number).await.unwrap(); + Ok(BasicWitnessGeneratorJob { + block_number: l1_batch_number, + data, + }) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); + object_store + .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) + .await + .unwrap(); + let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); + let url = object_store + .put(L1BatchNumber(job_id), &wrapper) + .await + .unwrap(); + + BlobUrls::Url(url) + } + + #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_urls = match blob_urls { + BlobUrls::Scheduler(blobs) => blobs, + _ => unreachable!(), + }; + + let mut connection = connection_pool + .connection() + .await + .expect("failed to get database connection"); + let mut transaction = connection + .start_transaction() + .await + .expect("failed to get database transaction"); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + L1BatchNumber(job_id), + blob_urls.circuit_ids_and_urls, + AggregationRound::BasicCircuits, + 0, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .create_aggregation_jobs( + L1BatchNumber(job_id), + &blob_urls.closed_form_inputs_and_urls, + &blob_urls.scheduler_witness_url, + get_recursive_layer_circuit_id_for_base_layer, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .mark_witness_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + transaction + .commit() + .await + .expect("failed to commit database transaction"); + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs new file mode 100644 index 000000000000..08732689e3a6 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs @@ -0,0 +1,153 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context as _; +use tracing::Instrument; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; +use zksync_queued_job_processor::{async_trait, JobProcessor}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, + basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + metrics::WITNESS_GENERATOR_METRICS, +}; + +#[async_trait] +impl JobProcessor for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type JobId = L1BatchNumber; + // The artifact is optional to support skipping blocks when sampling is enabled. + type JobArtifacts = Option; + + const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + let pod_name = get_current_pod_name(); + match prover_connection + .fri_witness_generator_dal() + .get_next_basic_circuit_witness_job( + last_l1_batch_to_process, + self.protocol_version, + &pod_name, + ) + .await + { + Some(block_number) => { + tracing::info!( + "Processing FRI basic witness-gen for block {}", + block_number + ); + let started_at = Instant::now(); + let job = Self::get_artifacts(&block_number, &*self.object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + + Ok(Some((block_number, job))) + } + None => Ok(None), + } + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_witness_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle>> { + let object_store = Arc::clone(&self.object_store); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + let block_number = job.block_number; + Ok( + Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await, + ) + }) + } + + #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + optional_artifacts: Option, + ) -> anyhow::Result<()> { + match optional_artifacts { + None => Ok(()), + Some(artifacts) => { + let blob_started_at = Instant::now(); + let circuit_urls = artifacts.circuit_urls.clone(); + let queue_urls = artifacts.queue_urls.clone(); + + let aux_output_witness_wrapper = + AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); + if self.config.shall_save_to_public_bucket { + self.public_blob_store.as_deref() + .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") + .put(job_id, &aux_output_witness_wrapper) + .await + .unwrap(); + } + + let scheduler_witness_url = + match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) + .await + { + BlobUrls::Url(url) => url, + _ => unreachable!(), + }; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] + .observe(blob_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + BlobUrls::Scheduler(SchedulerBlobUrls { + circuit_ids_and_urls: circuit_urls, + closed_form_inputs_and_urls: queue_urls, + scheduler_witness_url, + }), + artifacts, + ) + .await?; + Ok(()) + } + } + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for BasicWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_basic_circuit_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for BasicWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits.rs b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs similarity index 63% rename from prover/crates/bin/witness_generator/src/basic_circuits.rs rename to prover/crates/bin/witness_generator/src/basic_circuits/mod.rs index 00a4d99ba9a9..c9755c333dad 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs @@ -1,49 +1,43 @@ use std::{ - collections::{hash_map::DefaultHasher, HashSet}, - hash::{Hash, Hasher}, + collections::HashSet, + hash::{DefaultHasher, Hash, Hasher}, sync::Arc, time::Instant, }; -use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, + zkevm_circuits::{ + fsm_input_output::ClosedFormInputCompactFormWitness, + scheduler::{ + block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, + }, + }, }; use tokio::sync::Semaphore; use tracing::Instrument; -use zkevm_test_harness::{ - geometry_config::get_geometry_config, witness::oracle::WitnessGenerationArtifact, -}; +use zkevm_test_harness::witness::oracle::WitnessGenerationArtifact; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_multivm::{ - interface::storage::StorageView, - vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, -}; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - circuit_definitions::{ + circuit_sequencer_api_latest::{ boojum::{ field::goldilocks::{GoldilocksExt2, GoldilocksField}, gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, }, - zkevm_circuits::scheduler::{ - block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, - }, + geometry_config::get_geometry_config, }, - get_current_pod_name, - keys::ClosedFormInputKey, - AuxOutputWitnessWrapper, CircuitAuxData, + interface::storage::StorageView, + vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, + zk_evm_latest::ethereum_types::Address, }; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; -use zksync_queued_job_processor::JobProcessor; +use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, Address, - L1BatchNumber, BOOTLOADER_ADDRESS, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ @@ -52,33 +46,30 @@ use crate::{ storage_oracle::StorageOracle, utils::{ expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, SchedulerPartialInputWrapper, KZG_TRUSTED_SETUP_FILE, + ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; +mod artifacts; +pub mod job_processor; + +#[derive(Clone)] pub struct BasicCircuitArtifacts { - circuit_urls: Vec<(u8, String)>, - queue_urls: Vec<(u8, String, usize)>, - scheduler_witness: SchedulerCircuitInstanceWitness< + pub(super) circuit_urls: Vec<(u8, String)>, + pub(super) queue_urls: Vec<(u8, String, usize)>, + pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< GoldilocksField, CircuitGoldilocksPoseidon2Sponge, GoldilocksExt2, >, - aux_output_witness: BlockAuxilaryOutputWitness, -} - -#[derive(Debug)] -struct BlobUrls { - circuit_ids_and_urls: Vec<(u8, String)>, - closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - scheduler_witness_url: String, + pub(super) aux_output_witness: BlockAuxilaryOutputWitness, } #[derive(Clone)] pub struct BasicWitnessGeneratorJob { - block_number: L1BatchNumber, - job: WitnessInputData, + pub(super) block_number: L1BatchNumber, + pub(super) data: WitnessInputData, } #[derive(Debug)] @@ -90,6 +81,17 @@ pub struct BasicWitnessGenerator { protocol_version: ProtocolSemanticVersion, } +type Witness = ( + Vec<(u8, String)>, + Vec<(u8, String, usize)>, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + BlockAuxilaryOutputWitness, +); + impl BasicWitnessGenerator { pub fn new( config: FriWitnessGeneratorConfig, @@ -113,7 +115,10 @@ impl BasicWitnessGenerator { started_at: Instant, max_circuits_in_flight: usize, ) -> Option { - let BasicWitnessGeneratorJob { block_number, job } = basic_job; + let BasicWitnessGeneratorJob { + block_number, + data: job, + } = basic_job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -134,135 +139,8 @@ impl BasicWitnessGenerator { } } -#[async_trait] -impl JobProcessor for BasicWitnessGenerator { - type Job = BasicWitnessGeneratorJob; - type JobId = L1BatchNumber; - // The artifact is optional to support skipping blocks when sampling is enabled. - type JobArtifacts = Option; - - const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - let pod_name = get_current_pod_name(); - match prover_connection - .fri_witness_generator_dal() - .get_next_basic_circuit_witness_job( - last_l1_batch_to_process, - self.protocol_version, - &pod_name, - ) - .await - { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = get_artifacts(block_number, &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } - None => Ok(None), - } - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_witness_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: BasicWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle>> { - let object_store = Arc::clone(&self.object_store); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, - ) - }) - } - - #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - optional_artifacts: Option, - ) -> anyhow::Result<()> { - match optional_artifacts { - None => Ok(()), - Some(artifacts) => { - let blob_started_at = Instant::now(); - let scheduler_witness_url = save_scheduler_artifacts( - job_id, - artifacts.scheduler_witness, - artifacts.aux_output_witness, - &*self.object_store, - self.public_blob_store.as_deref(), - self.config.shall_save_to_public_bucket, - ) - .await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] - .observe(blob_started_at.elapsed()); - - update_database( - &self.prover_connection_pool, - started_at, - job_id, - BlobUrls { - circuit_ids_and_urls: artifacts.circuit_urls, - closed_form_inputs_and_urls: artifacts.queue_urls, - scheduler_witness_url, - }, - ) - .await; - Ok(()) - } - } - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_basic_circuit_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for BasicWitnessGenerator") - } -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn process_basic_circuits_job( +pub(super) async fn process_basic_circuits_job( object_store: Arc, started_at: Instant, block_number: L1BatchNumber, @@ -287,93 +165,6 @@ async fn process_basic_circuits_job( } } -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - block_number: L1BatchNumber, - blob_urls: BlobUrls, -) { - let mut connection = prover_connection_pool - .connection() - .await - .expect("failed to get database connection"); - let mut transaction = connection - .start_transaction() - .await - .expect("failed to get database transaction"); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::BasicCircuits, - 0, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .create_aggregation_jobs( - block_number, - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, - get_recursive_layer_circuit_id_for_base_layer, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .mark_witness_job_as_successful(block_number, started_at.elapsed()) - .await; - transaction - .commit() - .await - .expect("failed to commit database transaction"); -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn get_artifacts( - block_number: L1BatchNumber, - object_store: &dyn ObjectStore, -) -> BasicWitnessGeneratorJob { - let job = object_store.get(block_number).await.unwrap(); - BasicWitnessGeneratorJob { block_number, job } -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn save_scheduler_artifacts( - block_number: L1BatchNumber, - scheduler_partial_input: SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - aux_output_witness: BlockAuxilaryOutputWitness, - object_store: &dyn ObjectStore, - public_object_store: Option<&dyn ObjectStore>, - shall_save_to_public_bucket: bool, -) -> String { - let aux_output_witness_wrapper = AuxOutputWitnessWrapper(aux_output_witness); - if shall_save_to_public_bucket { - public_object_store - .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") - .put(block_number, &aux_output_witness_wrapper) - .await - .unwrap(); - } - object_store - .put(block_number, &aux_output_witness_wrapper) - .await - .unwrap(); - let wrapper = SchedulerPartialInputWrapper(scheduler_partial_input); - object_store.put(block_number, &wrapper).await.unwrap() -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] async fn save_recursion_queue( block_number: L1BatchNumber, @@ -396,17 +187,6 @@ async fn save_recursion_queue( (circuit_id, blob_url, basic_circuit_count) } -type Witness = ( - Vec<(u8, String)>, - Vec<(u8, String, usize)>, - SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - BlockAuxilaryOutputWitness, -); - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] async fn generate_witness( block_number: L1BatchNumber, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs new file mode 100644 index 000000000000..a94587d00ec6 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs @@ -0,0 +1,150 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; + +use crate::{ + artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, + metrics::WITNESS_GENERATOR_METRICS, + utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, +}; + +#[async_trait] +impl ArtifactsManager for LeafAggregationWitnessGenerator { + type InputMetadata = LeafAggregationJobMetadata; + type InputArtifacts = ClosedFormInputWrapper; + type OutputArtifacts = LeafAggregationArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let key = ClosedFormInputKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + }; + + let artifacts = object_store + .get(key) + .await + .unwrap_or_else(|_| panic!("leaf aggregation job artifacts missing: {:?}", key)); + + Ok(artifacts) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) + )] + async fn save_artifacts( + _job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + 0, + artifacts.aggregations, + object_store, + ) + .await; + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + BlobUrls::Aggregation(AggregationBlobUrls { + aggregations_urls, + circuit_ids_and_urls: artifacts.circuit_ids_and_urls, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + tracing::info!( + "Updating database for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + + let blob_urls = match blob_urls { + BlobUrls::Aggregation(blob_urls) => blob_urls, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await.unwrap(); + let mut transaction = prover_connection.start_transaction().await.unwrap(); + let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(artifacts.block_number) + .await; + tracing::info!( + "Inserting {} prover jobs for job_id {}, block {} with circuit id {}", + blob_urls.circuit_ids_and_urls.len(), + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + artifacts.block_number, + blob_urls.circuit_ids_and_urls, + AggregationRound::LeafAggregation, + 0, + protocol_version_id, + ) + .await; + tracing::info!( + "Updating node aggregation jobs url for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_witness_generator_dal() + .update_node_aggregation_jobs_url( + artifacts.block_number, + get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + number_of_dependent_jobs, + 0, + blob_urls.aggregations_urls, + ) + .await; + tracing::info!( + "Marking leaf aggregation job as successful for job id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_witness_generator_dal() + .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) + .await; + + tracing::info!( + "Committing transaction for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction.commit().await?; + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs new file mode 100644 index 000000000000..e032084151eb --- /dev/null +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs @@ -0,0 +1,124 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::{ + artifacts::ArtifactsManager, + leaf_aggregation::{ + prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, + LeafAggregationWitnessGeneratorJob, + }, + metrics::WITNESS_GENERATOR_METRICS, +}; + +#[async_trait] +impl JobProcessor for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type JobId = u32; + type JobArtifacts = LeafAggregationArtifacts; + + const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(metadata) = prover_connection + .fri_witness_generator_dal() + .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + tracing::info!("Processing leaf aggregation job {:?}", metadata.id); + Ok(Some(( + metadata.id, + prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_leaf_aggregation_job()")?, + ))) + } + + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_leaf_aggregation_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + }) + } + + async fn save_result( + &self, + job_id: u32, + started_at: Instant, + artifacts: LeafAggregationArtifacts, + ) -> anyhow::Result<()> { + let block_number = artifacts.block_number; + let circuit_id = artifacts.circuit_id; + tracing::info!( + "Saving leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); + + let blob_save_started_at = Instant::now(); + + let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] + .observe(blob_save_started_at.elapsed()); + + tracing::info!( + "Saved leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); + Self::update_database( + &self.prover_connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_leaf_aggregation_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for LeafAggregationWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs similarity index 50% rename from prover/crates/bin/witness_generator/src/leaf_aggregation.rs rename to prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs index 2f4494187975..d669a4cc97e3 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; use tokio::sync::Semaphore; use zkevm_test_harness::{ @@ -12,7 +11,7 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -22,40 +21,25 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, - keys::ClosedFormInputKey, FriProofWrapper, }; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_queued_job_processor::JobProcessor; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::LeafAggregationJobMetadata, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, + load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, + ClosedFormInputWrapper, }, }; -pub struct LeafAggregationArtifacts { - circuit_id: u8, - block_number: L1BatchNumber, - pub aggregations: Vec<(u64, RecursionQueueSimulator)>, - pub circuit_ids_and_urls: Vec<(u8, String)>, - #[allow(dead_code)] - closed_form_inputs: Vec>, -} - -#[derive(Debug)] -struct BlobUrls { - circuit_ids_and_urls: Vec<(u8, String)>, - aggregations_urls: String, -} +mod artifacts; +mod job_processor; pub struct LeafAggregationWitnessGeneratorJob { pub(crate) circuit_id: u8, @@ -72,7 +56,17 @@ pub struct LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, +} + +#[derive(Clone)] +pub struct LeafAggregationArtifacts { + circuit_id: u8, + block_number: L1BatchNumber, + pub aggregations: Vec<(u64, RecursionQueueSimulator)>, + pub circuit_ids_and_urls: Vec<(u8, String)>, + #[allow(dead_code)] + closed_form_inputs: Vec>, } impl LeafAggregationWitnessGenerator { @@ -81,14 +75,14 @@ impl LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -113,112 +107,6 @@ impl LeafAggregationWitnessGenerator { } } -#[async_trait] -impl JobProcessor for LeafAggregationWitnessGenerator { - type Job = LeafAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = LeafAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing leaf aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_leaf_aggregation_job( - metadata, - &*self.object_store, - self.setup_data_path.clone(), - ) - .await - .context("prepare_leaf_aggregation_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_leaf_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: LeafAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - tracing::info!( - "Saving leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - let blob_urls = save_artifacts(artifacts, &*self.object_store).await; - tracing::info!( - "Saved leaf aggregation artifacts for block {} with circuit {} (count: {})", - block_number.0, - circuit_id, - blob_urls.circuit_ids_and_urls.len(), - ); - update_database( - &self.prover_connection_pool, - started_at, - block_number, - job_id, - blob_urls, - circuit_id, - ) - .await; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_leaf_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for LeafAggregationWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) @@ -226,16 +114,16 @@ impl JobProcessor for LeafAggregationWitnessGenerator { pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let closed_form_input = get_artifacts(&metadata, object_store).await; + let closed_form_input = + LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let base_vk = keystore .load_base_layer_verification_key(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; @@ -373,125 +261,3 @@ pub async fn process_leaf_aggregation_job( closed_form_inputs: job.closed_form_inputs.0, } } - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - block_number: L1BatchNumber, - job_id: u32, - blob_urls: BlobUrls, - circuit_id: u8, -) { - tracing::info!( - "Updating database for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - let mut prover_connection = prover_connection_pool.connection().await.unwrap(); - let mut transaction = prover_connection.start_transaction().await.unwrap(); - let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - tracing::info!( - "Inserting {} prover jobs for job_id {}, block {} with circuit id {}", - blob_urls.circuit_ids_and_urls.len(), - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::LeafAggregation, - 0, - protocol_version_id, - ) - .await; - tracing::info!( - "Updating node aggregation jobs url for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_witness_generator_dal() - .update_node_aggregation_jobs_url( - block_number, - get_recursive_layer_circuit_id_for_base_layer(circuit_id), - number_of_dependent_jobs, - 0, - blob_urls.aggregations_urls, - ) - .await; - tracing::info!( - "Marking leaf aggregation job as successful for job id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_witness_generator_dal() - .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) - .await; - - tracing::info!( - "Committing transaction for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction.commit().await.unwrap(); -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -async fn get_artifacts( - metadata: &LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, -) -> ClosedFormInputWrapper { - let key = ClosedFormInputKey { - block_number: metadata.block_number, - circuit_id: metadata.circuit_id, - }; - object_store - .get(key) - .await - .unwrap_or_else(|_| panic!("leaf aggregation job artifacts missing: {:?}", key)) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) -)] -async fn save_artifacts( - artifacts: LeafAggregationArtifacts, - object_store: &dyn ObjectStore, -) -> BlobUrls { - let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - BlobUrls { - circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - aggregations_urls, - } -} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index 00d2ebf2bb3d..c0ac9718c6ee 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -1,6 +1,7 @@ #![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] +pub mod artifacts; pub mod basic_circuits; pub mod leaf_aggregation; pub mod metrics; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 50c955168602..9d75d8ddc6f1 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -14,10 +14,10 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; +use zksync_types::{basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion}; use zksync_utils::wait_for_tasks::ManagedTasks; -use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, @@ -54,6 +54,41 @@ struct Opt { secrets_path: Option, } +/// Checks if the configuration locally matches the one in the database. +/// This function recalculates the commitment in order to check the exact code that +/// will run, instead of loading `commitments.json` (which also may correct misaligned +/// information). +async fn ensure_protocol_alignment( + prover_pool: &ConnectionPool, + protocol_version: ProtocolSemanticVersion, + keystore: &Keystore, +) -> anyhow::Result<()> { + tracing::info!("Verifying protocol alignment for {:?}", protocol_version); + let vk_commitments_in_db = match prover_pool + .connection() + .await + .unwrap() + .fri_protocol_versions_dal() + .vk_commitments_for(protocol_version) + .await + { + Some(commitments) => commitments, + None => { + panic!( + "No vk commitments available in database for a protocol version {:?}.", + protocol_version + ); + } + }; + let scheduler_vk_hash = vk_commitments_in_db.snark_wrapper_vk_hash; + keystore + .verify_scheduler_vk_hash(scheduler_vk_hash) + .with_context(|| + format!("VK commitments didn't match commitments from DB for protocol version {protocol_version:?}") + )?; + Ok(()) +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); @@ -82,6 +117,8 @@ async fn main() -> anyhow::Result<()> { .witness_generator_config .context("witness generator config")? .clone(); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let prometheus_config = general_config.prometheus_config.clone(); @@ -103,22 +140,9 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - let vk_commitments_in_db = match prover_connection_pool - .connection() - .await - .unwrap() - .fri_protocol_versions_dal() - .vk_commitments_for(protocol_version) + ensure_protocol_alignment(&prover_connection_pool, protocol_version, &keystore) .await - { - Some(commitments) => commitments, - None => { - panic!( - "No vk commitments available in database for a protocol version {:?}.", - protocol_version - ); - } - }; + .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); let rounds = match (opt.round, opt.all_rounds) { (Some(round), false) => vec![round], @@ -159,8 +183,6 @@ async fn main() -> anyhow::Result<()> { let mut tasks = Vec::new(); tasks.push(tokio::spawn(prometheus_task)); - let setup_data_path = prover_config.setup_data_path.clone(); - for round in rounds { tracing::info!( "initializing the {:?} witness generator, batch size: {:?} with protocol_version: {:?}", @@ -171,13 +193,6 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); - assert_eq!( - vk_commitments, - vk_commitments_in_db, - "VK commitments didn't match commitments from DB for protocol version {protocol_version:?}. Cached commitments: {vk_commitments:?}, commitments in database: {vk_commitments_in_db:?}" - ); - let public_blob_store = match config.shall_save_to_public_bucket { false => None, true => Some( @@ -206,7 +221,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -216,7 +231,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -226,7 +241,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -236,7 +251,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs new file mode 100644 index 000000000000..245027f0d677 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs @@ -0,0 +1,146 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::keys::AggregationsKey; +use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; + +use crate::{ + artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + metrics::WITNESS_GENERATOR_METRICS, + node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, + utils::{save_node_aggregations_artifacts, AggregationWrapper}, +}; + +#[async_trait] +impl ArtifactsManager for NodeAggregationWitnessGenerator { + type InputMetadata = NodeAggregationJobMetadata; + type InputArtifacts = AggregationWrapper; + type OutputArtifacts = NodeAggregationArtifacts; + + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let key = AggregationsKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + depth: metadata.depth, + }; + let artifacts = object_store.get(key).await.unwrap_or_else(|error| { + panic!( + "node aggregation job artifacts getting error. Key: {:?}, error: {:?}", + key, error + ) + }); + + Ok(artifacts) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) + )] + async fn save_artifacts( + _job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + artifacts.circuit_id, + artifacts.depth, + artifacts.next_aggregations, + object_store, + ) + .await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + + BlobUrls::Aggregation(AggregationBlobUrls { + aggregations_urls, + circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = % job_id) + )] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let mut prover_connection = connection_pool.connection().await.unwrap(); + let blob_urls = match blob_urls { + BlobUrls::Aggregation(blobs) => blobs, + _ => unreachable!(), + }; + let mut transaction = prover_connection.start_transaction().await.unwrap(); + let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(artifacts.block_number) + .await; + match artifacts.next_aggregations.len() > 1 { + true => { + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + artifacts.block_number, + blob_urls.circuit_ids_and_urls, + AggregationRound::NodeAggregation, + artifacts.depth, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .insert_node_aggregation_jobs( + artifacts.block_number, + artifacts.circuit_id, + Some(dependent_jobs as i32), + artifacts.depth, + &blob_urls.aggregations_urls, + protocol_version_id, + ) + .await; + } + false => { + let (_, blob_url) = blob_urls.circuit_ids_and_urls[0].clone(); + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + artifacts.block_number, + artifacts.circuit_id, + artifacts.depth, + 0, + AggregationRound::NodeAggregation, + &blob_url, + true, + protocol_version_id, + ) + .await + } + } + + transaction + .fri_witness_generator_dal() + .mark_node_aggregation_as_successful(job_id, started_at.elapsed()) + .await; + + transaction.commit().await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs new file mode 100644 index 000000000000..a015462cd6fe --- /dev/null +++ b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs @@ -0,0 +1,115 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + node_aggregation::{ + prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, + NodeAggregationWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type JobId = u32; + type JobArtifacts = NodeAggregationArtifacts; + + const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(metadata) = prover_connection + .fri_witness_generator_dal() + .get_next_node_aggregation_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + tracing::info!("Processing node aggregation job {:?}", metadata.id); + Ok(Some(( + metadata.id, + prepare_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_node_aggregation_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = % artifacts.block_number, circuit_id = % artifacts.circuit_id) + )] + async fn save_result( + &self, + job_id: u32, + started_at: Instant, + artifacts: NodeAggregationArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_node_aggregation_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for NodeAggregationWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs similarity index 50% rename from prover/crates/bin/witness_generator/src/node_aggregation.rs rename to prover/crates/bin/witness_generator/src/node_aggregation/mod.rs index b6fc6b8f7c65..047caa363a89 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ @@ -9,7 +8,7 @@ use zkevm_test_harness::witness::recursive_aggregation::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -19,25 +18,24 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, - keys::AggregationsKey, FriProofWrapper, }; -use zksync_queued_job_processor::JobProcessor; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::NodeAggregationJobMetadata, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, AggregationWrapper, - }, + utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, }; +mod artifacts; +mod job_processor; + +#[derive(Clone)] pub struct NodeAggregationArtifacts { circuit_id: u8, block_number: L1BatchNumber, @@ -46,12 +44,6 @@ pub struct NodeAggregationArtifacts { pub recursive_circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -struct BlobUrls { - node_aggregations_url: String, - circuit_ids_and_urls: Vec<(u8, String)>, -} - #[derive(Clone)] pub struct NodeAggregationWitnessGeneratorJob { circuit_id: u8, @@ -70,7 +62,7 @@ pub struct NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl NodeAggregationWitnessGenerator { @@ -79,20 +71,20 @@ impl NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } #[tracing::instrument( skip_all, - fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] pub async fn process_job_impl( job: NodeAggregationWitnessGeneratorJob, @@ -223,122 +215,22 @@ impl NodeAggregationWitnessGenerator { } } -#[async_trait] -impl JobProcessor for NodeAggregationWitnessGenerator { - type Job = NodeAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = NodeAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_node_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing node aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_job(metadata, &*self.object_store, self.setup_data_path.clone()) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_node_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) - )] - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: NodeAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - let depth = artifacts.depth; - let shall_continue_node_aggregations = artifacts.next_aggregations.len() > 1; - let blob_urls = save_artifacts(artifacts, &*self.object_store).await; - update_database( - &self.prover_connection_pool, - started_at, - job_id, - block_number, - depth, - circuit_id, - blob_urls, - shall_continue_node_aggregations, - ) - .await; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_node_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for NodeAggregationWitnessGenerator") - } -} - #[tracing::instrument( skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) )] pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let artifacts = get_artifacts(&metadata, object_store).await; + let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let leaf_vk = keystore .load_recursive_layer_verification_key(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; @@ -362,123 +254,3 @@ pub async fn prepare_job( all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, }) } - -#[allow(clippy::too_many_arguments)] -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - id: u32, - block_number: L1BatchNumber, - depth: u16, - circuit_id: u8, - blob_urls: BlobUrls, - shall_continue_node_aggregations: bool, -) { - let mut prover_connection = prover_connection_pool.connection().await.unwrap(); - let mut transaction = prover_connection.start_transaction().await.unwrap(); - let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - match shall_continue_node_aggregations { - true => { - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::NodeAggregation, - depth, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .insert_node_aggregation_jobs( - block_number, - circuit_id, - Some(dependent_jobs as i32), - depth, - &blob_urls.node_aggregations_url, - protocol_version_id, - ) - .await; - } - false => { - let (_, blob_url) = blob_urls.circuit_ids_and_urls[0].clone(); - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - block_number, - circuit_id, - depth, - 0, - AggregationRound::NodeAggregation, - &blob_url, - true, - protocol_version_id, - ) - .await - } - } - - transaction - .fri_witness_generator_dal() - .mark_node_aggregation_as_successful(id, started_at.elapsed()) - .await; - - transaction.commit().await.unwrap(); -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -async fn get_artifacts( - metadata: &NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, -) -> AggregationWrapper { - let key = AggregationsKey { - block_number: metadata.block_number, - circuit_id: metadata.circuit_id, - depth: metadata.depth, - }; - object_store.get(key).await.unwrap_or_else(|error| { - panic!( - "node aggregation job artifacts getting error. Key: {:?}, error: {:?}", - key, error - ) - }) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) -)] -async fn save_artifacts( - artifacts: NodeAggregationArtifacts, - object_store: &dyn ObjectStore, -) -> BlobUrls { - let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); - - BlobUrls { - node_aggregations_url: aggregations_urls, - circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - } -} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs new file mode 100644 index 000000000000..8379fcf9f933 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs @@ -0,0 +1,141 @@ +use std::{collections::HashMap, time::Instant}; + +use async_trait::async_trait; +use circuit_definitions::{ + circuit_definitions::recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, +}; +use zkevm_test_harness::empty_node_proof; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapper}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, +}; + +#[async_trait] +impl ArtifactsManager for RecursionTipWitnessGenerator { + type InputMetadata = Vec<(u8, u32)>; + type InputArtifacts = Vec; + type OutputArtifacts = RecursionTipArtifacts; + + /// Loads all proofs for a given recursion tip's job ids. + /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). + /// In this scenario, we still need to pass a proof, but it won't be taken into account during proving. + /// For this scenario, we use an empty_proof, but any proof would suffice. + async fn get_artifacts( + metadata: &Vec<(u8, u32)>, + object_store: &dyn ObjectStore, + ) -> anyhow::Result> { + let job_mapping: HashMap = metadata + .clone() + .into_iter() + .map(|(leaf_circuit_id, job_id)| { + ( + ZkSyncRecursionLayerStorageType::from_leaf_u8_to_basic_u8(leaf_circuit_id), + job_id, + ) + }) + .collect(); + + let empty_proof = empty_node_proof().into_inner(); + + let mut proofs = Vec::new(); + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + if job_mapping.contains_key(&circuit_id) { + let fri_proof_wrapper = object_store + .get(*job_mapping.get(&circuit_id).unwrap()) + .await + .unwrap_or_else(|_| { + panic!( + "Failed to load proof with circuit_id {} for recursion tip", + circuit_id + ) + }); + match fri_proof_wrapper { + FriProofWrapper::Base(_) => { + return Err(anyhow::anyhow!( + "Expected only recursive proofs for recursion tip, got Base for circuit {}", + circuit_id + )); + } + FriProofWrapper::Recursive(recursive_proof) => { + proofs.push(recursive_proof.into_inner()); + } + } + } else { + proofs.push(empty_proof.clone()); + } + } + Ok(proofs) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let key = FriCircuitKey { + block_number: L1BatchNumber(job_id), + circuit_id: 255, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::RecursionTip, + }; + + let blob_url = object_store + .put( + key, + &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), + ) + .await + .unwrap(); + + BlobUrls::Url(blob_url) + } + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_url = match blob_urls { + BlobUrls::Url(url) => url, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await?; + let mut transaction = prover_connection.start_transaction().await?; + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + L1BatchNumber(job_id), + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + 0, + 0, + AggregationRound::RecursionTip, + &blob_url, + false, + protocol_version_id, + ) + .await; + + transaction + .fri_witness_generator_dal() + .mark_recursion_tip_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + + transaction.commit().await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs new file mode 100644 index 000000000000..f114724cfec4 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs @@ -0,0 +1,130 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + recursion_tip::{ + prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, + RecursionTipWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for RecursionTipWitnessGenerator { + type Job = RecursionTipWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = RecursionTipArtifacts; + + const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection + .fri_witness_generator_dal() + .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + + let final_node_proof_job_ids = prover_connection + .fri_prover_jobs_dal() + .get_final_node_proof_job_ids_for(l1_batch_number) + .await; + + assert_eq!( + final_node_proof_job_ids.len(), + number_of_final_node_jobs as usize, + "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", + number_of_final_node_jobs, final_node_proof_job_ids.len() + ); + + Ok(Some(( + l1_batch_number, + prepare_job( + l1_batch_number, + final_node_proof_job_ids, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_recursion_tip_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: RecursionTipWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: RecursionTipArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = + Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + blob_urls, + artifacts, + ) + .await?; + + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_recursion_tip_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for RecursionTipWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs similarity index 56% rename from prover/crates/bin/witness_generator/src/recursion_tip.rs rename to prover/crates/bin/witness_generator/src/recursion_tip/mod.rs index e05a0cc38cf8..4abb56a7d788 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; -use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::recursion_layer::{ recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, @@ -37,23 +36,20 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - get_current_pod_name, - keys::{ClosedFormInputKey, FriCircuitKey}, - CircuitWrapper, -}; -use zksync_queued_job_processor::JobProcessor; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; use crate::{ - metrics::WITNESS_GENERATOR_METRICS, - utils::{load_proofs_for_recursion_tip, ClosedFormInputWrapper}, + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, }; +mod artifacts; +mod job_processor; + #[derive(Clone)] pub struct RecursionTipWitnessGeneratorJob { block_number: L1BatchNumber, @@ -65,6 +61,7 @@ pub struct RecursionTipWitnessGeneratorJob { node_vk: ZkSyncRecursionLayerVerificationKey, } +#[derive(Clone)] pub struct RecursionTipArtifacts { pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, } @@ -75,7 +72,7 @@ pub struct RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl RecursionTipWitnessGenerator { @@ -84,14 +81,14 @@ impl RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -138,148 +135,6 @@ impl RecursionTipWitnessGenerator { } } -#[async_trait] -impl JobProcessor for RecursionTipWitnessGenerator { - type Job = RecursionTipWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = RecursionTipArtifacts; - - const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection - .fri_witness_generator_dal() - .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - - let final_node_proof_job_ids = prover_connection - .fri_prover_jobs_dal() - .get_final_node_proof_job_ids_for(l1_batch_number) - .await; - - assert_eq!( - final_node_proof_job_ids.len(), - number_of_final_node_jobs as usize, - "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", - number_of_final_node_jobs, final_node_proof_job_ids.len() - ); - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, - &*self.object_store, - self.setup_data_path.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_recursion_tip_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: RecursionTipWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: RecursionTipArtifacts, - ) -> anyhow::Result<()> { - let key = FriCircuitKey { - block_number: job_id, - circuit_id: 255, - sequence_number: 0, - depth: 0, - aggregation_round: AggregationRound::RecursionTip, - }; - let blob_save_started_at = Instant::now(); - - let recursion_tip_circuit_blob_url = self - .object_store - .put( - key, - &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit), - ) - .await?; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] - .observe(blob_save_started_at.elapsed()); - - let mut prover_connection = self.prover_connection_pool.connection().await?; - let mut transaction = prover_connection.start_transaction().await?; - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(job_id) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - job_id, - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - 0, - 0, - AggregationRound::RecursionTip, - &recursion_tip_circuit_blob_url, - false, - protocol_version_id, - ) - .await; - - transaction - .fri_witness_generator_dal() - .mark_recursion_tip_job_as_successful(job_id, started_at.elapsed()) - .await; - - transaction.commit().await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_recursion_tip_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for RecursionTipWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %l1_batch_number) @@ -288,15 +143,15 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, final_node_proof_job_ids: Vec<(u8, u32)>, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = - load_proofs_for_recursion_tip(final_node_proof_job_ids, object_store).await?; + RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) + .await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs new file mode 100644 index 000000000000..b20a97641887 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs @@ -0,0 +1,94 @@ +use std::time::Instant; + +use async_trait::async_trait; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapper}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, +}; + +#[async_trait] +impl ArtifactsManager for SchedulerWitnessGenerator { + type InputMetadata = u32; + type InputArtifacts = FriProofWrapper; + type OutputArtifacts = SchedulerArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let artifacts = object_store.get(*metadata).await?; + + Ok(artifacts) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let key = FriCircuitKey { + block_number: L1BatchNumber(job_id), + circuit_id: 1, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::Scheduler, + }; + + let blob_url = object_store + .put( + key, + &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), + ) + .await + .unwrap(); + + BlobUrls::Url(blob_url) + } + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_url = match blob_urls { + BlobUrls::Url(url) => url, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await?; + let mut transaction = prover_connection.start_transaction().await?; + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + L1BatchNumber(job_id), + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + 0, + 0, + AggregationRound::Scheduler, + &blob_url, + false, + protocol_version_id, + ) + .await; + + transaction + .fri_witness_generator_dal() + .mark_scheduler_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + + transaction.commit().await?; + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs new file mode 100644 index 000000000000..fe4f2db4090a --- /dev/null +++ b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs @@ -0,0 +1,129 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + scheduler::{ + prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = SchedulerArtifacts; + + const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(l1_batch_number) = prover_connection + .fri_witness_generator_dal() + .get_next_scheduler_witness_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + let recursion_tip_job_id = prover_connection + .fri_prover_jobs_dal() + .get_recursion_tip_proof_job_id(l1_batch_number) + .await + .context(format!( + "could not find recursion tip proof for l1 batch {}", + l1_batch_number + ))?; + + Ok(Some(( + l1_batch_number, + prepare_job( + l1_batch_number, + recursion_tip_job_id, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_scheduler_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + tokio::task::spawn_blocking(move || { + let block_number = job.block_number; + let _span = tracing::info_span!("scheduler", %block_number).entered(); + Ok(Self::process_job_sync(job, started_at)) + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: SchedulerArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = + Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + blob_urls, + artifacts, + ) + .await?; + + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_scheduler_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for SchedulerWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs similarity index 52% rename from prover/crates/bin/witness_generator/src/scheduler.rs rename to prover/crates/bin/witness_generator/src/scheduler/mod.rs index c389e037ffa6..10230b35c4f6 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/mod.rs @@ -1,13 +1,12 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -21,18 +20,22 @@ use zksync_prover_fri_types::{ recursion_layer_proof_config, zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, }, - get_current_pod_name, - keys::FriCircuitKey, - CircuitWrapper, FriProofWrapper, + FriProofWrapper, }; -use zksync_queued_job_processor::JobProcessor; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; -use crate::{metrics::WITNESS_GENERATOR_METRICS, utils::SchedulerPartialInputWrapper}; +use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, + utils::SchedulerPartialInputWrapper, +}; + +mod artifacts; +mod job_processor; +#[derive(Clone)] pub struct SchedulerArtifacts { pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, } @@ -57,7 +60,7 @@ pub struct SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl SchedulerWitnessGenerator { @@ -66,14 +69,14 @@ impl SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -121,143 +124,6 @@ impl SchedulerWitnessGenerator { } } -#[async_trait] -impl JobProcessor for SchedulerWitnessGenerator { - type Job = SchedulerWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = SchedulerArtifacts; - - const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(l1_batch_number) = prover_connection - .fri_witness_generator_dal() - .get_next_scheduler_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - let recursion_tip_job_id = prover_connection - .fri_prover_jobs_dal() - .get_recursion_tip_proof_job_id(l1_batch_number) - .await - .context(format!( - "could not find recursion tip proof for l1 batch {}", - l1_batch_number - ))?; - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, - &*self.object_store, - self.setup_data_path.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_scheduler_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: SchedulerWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: SchedulerArtifacts, - ) -> anyhow::Result<()> { - let key = FriCircuitKey { - block_number: job_id, - circuit_id: 1, - sequence_number: 0, - depth: 0, - aggregation_round: AggregationRound::Scheduler, - }; - let blob_save_started_at = Instant::now(); - let scheduler_circuit_blob_url = self - .object_store - .put(key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit)) - .await?; - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] - .observe(blob_save_started_at.elapsed()); - - let mut prover_connection = self.prover_connection_pool.connection().await?; - let mut transaction = prover_connection.start_transaction().await?; - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(job_id) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - job_id, - ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, - 0, - 0, - AggregationRound::Scheduler, - &scheduler_circuit_blob_url, - false, - protocol_version_id, - ) - .await; - - transaction - .fri_witness_generator_dal() - .mark_scheduler_job_as_successful(job_id, started_at.elapsed()) - .await; - - transaction.commit().await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_scheduler_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for SchedulerWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %l1_batch_number) @@ -266,10 +132,11 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, recursion_tip_job_id: u32, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let wrapper = object_store.get(recursion_tip_job_id).await?; + let wrapper = + SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; let recursion_tip_proof = match wrapper { FriProofWrapper::Base(_) => Err(anyhow::anyhow!( "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" @@ -280,7 +147,6 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index f8656ac90f44..3ea2b539773f 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -1,21 +1,14 @@ use std::{ - collections::HashMap, io::{BufWriter, Write as _}, sync::Arc, }; use circuit_definitions::{ - circuit_definitions::{ - base_layer::ZkSyncBaseLayerCircuit, - recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, - }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, encodings::memory_query::MemoryQueueStateWitnesses, }; use once_cell::sync::Lazy; -use zkevm_test_harness::{ - boojum::field::goldilocks::GoldilocksField, empty_node_proof, - zkevm_circuits::scheduler::aux::BaseLayerCircuitType, -}; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; use zksync_object_store::{serialize_using_bincode, Bucket, ObjectStore, StoredObject}; use zksync_prover_fri_types::{ @@ -248,54 +241,3 @@ pub async fn load_proofs_for_job_ids( .map(|x| x.unwrap()) .collect() } - -/// Loads all proofs for a given recursion tip's job ids. -/// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). -/// In this scenario, we still need to pass a proof, but it won't be taken into account during proving. -/// For this scenario, we use an empty_proof, but any proof would suffice. -#[tracing::instrument(skip_all)] -pub async fn load_proofs_for_recursion_tip( - job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, -) -> anyhow::Result> { - let job_mapping: HashMap = job_ids - .into_iter() - .map(|(leaf_circuit_id, job_id)| { - ( - ZkSyncRecursionLayerStorageType::from_leaf_u8_to_basic_u8(leaf_circuit_id), - job_id, - ) - }) - .collect(); - - let empty_proof = empty_node_proof().into_inner(); - - let mut proofs = Vec::new(); - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - if job_mapping.contains_key(&circuit_id) { - let fri_proof_wrapper = object_store - .get(*job_mapping.get(&circuit_id).unwrap()) - .await - .unwrap_or_else(|_| { - panic!( - "Failed to load proof with circuit_id {} for recursion tip", - circuit_id - ) - }); - match fri_proof_wrapper { - FriProofWrapper::Base(_) => { - return Err(anyhow::anyhow!( - "Expected only recursive proofs for recursion tip, got Base for circuit {}", - circuit_id - )); - } - FriProofWrapper::Recursive(recursive_proof) => { - proofs.push(recursive_proof.into_inner()); - } - } - } else { - proofs.push(empty_proof.clone()); - } - } - Ok(proofs) -} diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index b034ab57d82c..3323e3c681e4 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -8,6 +8,7 @@ use zksync_prover_fri_types::{ CircuitWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{ basic_fri_types::AggregationRound, prover_dal::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}, @@ -50,13 +51,10 @@ async fn test_leaf_witness_gen() { .await .unwrap(); - let job = prepare_leaf_aggregation_job( - leaf_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = LeafAggregationWitnessGenerator::process_job_impl( job, @@ -143,13 +141,11 @@ async fn test_node_witness_gen() { prover_job_ids_for_proofs: vec![5211320], }; - let job = node_aggregation::prepare_job( - node_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = + node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = NodeAggregationWitnessGenerator::process_job_impl( job, diff --git a/prover/crates/bin/witness_vector_generator/Cargo.toml b/prover/crates/bin/witness_vector_generator/Cargo.toml index 6a1d0af861c6..e8386c8090a3 100644 --- a/prover/crates/bin/witness_vector_generator/Cargo.toml +++ b/prover/crates/bin/witness_vector_generator/Cargo.toml @@ -22,7 +22,7 @@ zksync_prover_fri_types.workspace = true zksync_core_leftovers.workspace = true zksync_queued_job_processor.workspace = true zksync_vlog.workspace = true -vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index 800931f5d7cc..6695905c07e3 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -17,12 +17,12 @@ use zksync_prover_fri_types::{ use zksync_prover_fri_utils::{ fetch_next_circuit, get_numeric_circuit_id, region_fetcher::Zone, socket_utils::send_assembly, }; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, prover_dal::GpuProverInstanceStatus, }; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; @@ -34,7 +34,7 @@ pub struct WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, } impl WitnessVectorGenerator { @@ -47,7 +47,7 @@ impl WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, ) -> Self { Self { object_store, @@ -57,7 +57,7 @@ impl WitnessVectorGenerator { config, protocol_version, max_attempts, - setup_data_path, + keystore, } } @@ -127,16 +127,10 @@ impl JobProcessor for WitnessVectorGenerator { job: ProverJob, _started_at: Instant, ) -> JoinHandle> { - let setup_data_path = self.setup_data_path.clone(); - + let keystore = self.keystore.clone(); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); - let keystore = if let Some(setup_data_path) = setup_data_path { - Keystore::new_with_setup_data_path(setup_data_path) - } else { - Keystore::default() - }; Self::generate_witness_vector(job, &keystore) }) } diff --git a/prover/crates/bin/witness_vector_generator/src/main.rs b/prover/crates/bin/witness_vector_generator/src/main.rs index 1d3113ebf1aa..17ac3bd6fc9f 100644 --- a/prover/crates/bin/witness_vector_generator/src/main.rs +++ b/prover/crates/bin/witness_vector_generator/src/main.rs @@ -12,6 +12,7 @@ use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::ConnectionPool; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::RegionFetcher}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -87,6 +88,9 @@ async fn main() -> anyhow::Result<()> { .await .context("get_zone()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let (stop_sender, stop_receiver) = watch::channel(false); @@ -120,7 +124,7 @@ async fn main() -> anyhow::Result<()> { config.clone(), protocol_version, prover_config.max_attempts, - Some(prover_config.setup_data_path.clone()), + keystore.clone(), ); tasks.push(tokio::spawn( witness_vector_generator.run(stop_receiver.clone(), opt.n_iterations), diff --git a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs index c6dfec5009f5..bcf01ddc4061 100644 --- a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs @@ -1,8 +1,8 @@ use std::fs; use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use zksync_witness_vector_generator::generator::WitnessVectorGenerator; #[test] @@ -22,8 +22,7 @@ fn test_generate_witness_vector() { circuit_wrapper, setup_data_key: key, }; - let vector = - WitnessVectorGenerator::generate_witness_vector(job, &Keystore::default()).unwrap(); + let vector = WitnessVectorGenerator::generate_witness_vector(job, &Keystore::locate()).unwrap(); assert!(!vector.witness_vector.all_values.is_empty()); assert!(!vector.witness_vector.multiplicities.is_empty()); assert!(!vector.witness_vector.public_inputs_locations.is_empty()); diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml new file mode 100644 index 000000000000..617030754f8b --- /dev/null +++ b/prover/crates/lib/keystore/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "zksync_prover_keystore" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + + +[dependencies] +zksync_basic_types.workspace = true +zksync_utils.workspace = true +zksync_prover_fri_types.workspace = true +zkevm_test_harness.workspace = true +circuit_definitions = { workspace = true, features = ["log_tracing"] } +shivini = { workspace = true, optional = true } + +anyhow.workspace = true +tracing.workspace = true +serde_json.workspace = true +serde = { workspace = true, features = ["derive"] } +bincode.workspace = true +once_cell.workspace = true +md5.workspace = true +sha3.workspace = true +hex.workspace = true + +[features] +default = [] +gpu = ["shivini"] diff --git a/prover/crates/lib/keystore/src/commitment_utils.rs b/prover/crates/lib/keystore/src/commitment_utils.rs new file mode 100644 index 000000000000..6973f86bf41e --- /dev/null +++ b/prover/crates/lib/keystore/src/commitment_utils.rs @@ -0,0 +1,78 @@ +use std::str::FromStr; + +use anyhow::Context as _; +use hex::ToHex; +use zkevm_test_harness::witness::recursive_aggregation::{ + compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, +}; +use zksync_basic_types::H256; +use zksync_prover_fri_types::circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, +}; + +use crate::{ + keystore::Keystore, + utils::{calculate_snark_vk_hash, get_leaf_vk_params}, + VkCommitments, +}; + +impl Keystore { + pub fn generate_commitments(&self) -> anyhow::Result { + let leaf_vk_params = get_leaf_vk_params(self).context("get_leaf_vk_params()")?; + let leaf_layer_params = leaf_vk_params + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); + + let node_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; + let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let scheduler_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; + let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); + + let hex_concatenator = |hex_array: [GoldilocksField; 4]| { + "0x".to_owned() + + &hex_array + .iter() + .map(|x| format!("{:016x}", x.0)) + .collect::>() + .join("") + }; + + let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); + let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); + let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); + let snark_vk_hash: String = calculate_snark_vk_hash(self)?.encode_hex(); + + let result = VkCommitments { + leaf: leaf_aggregation_commitment_hex, + node: node_aggregation_commitment_hex, + scheduler: scheduler_commitment_hex, + snark_wrapper: format!("0x{}", snark_vk_hash), + }; + tracing::info!("Commitments: {:?}", result); + Ok(result) + } + + pub fn verify_scheduler_vk_hash(&self, expected_hash: H256) -> anyhow::Result<()> { + let commitments = self + .generate_commitments() + .context("generate_commitments()")?; + let calculated_hash = + H256::from_str(&commitments.snark_wrapper).context("invalid SNARK wrapper VK")?; + anyhow::ensure!(expected_hash == calculated_hash, "Invalid SNARK wrapper VK hash. Calculated locally: {calculated_hash:?}, provided: {expected_hash:?}"); + Ok(()) + } +} diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs similarity index 87% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs rename to prover/crates/lib/keystore/src/keystore.rs index e886b5d1b0c0..28ce989287cc 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -16,14 +16,13 @@ use circuit_definitions::{ }; use serde::{Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; -use zksync_config::configs::FriProverConfig; -use zksync_env_config::FromEnv; +use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; -use zksync_types::basic_fri_types::AggregationRound; +use zksync_utils::env::Workspace; #[cfg(feature = "gpu")] use crate::GoldilocksGpuProverSetupData; -use crate::{utils::core_workspace_dir_or_current_dir, GoldilocksProverSetupData, VkCommitments}; +use crate::{GoldilocksProverSetupData, VkCommitments}; pub enum ProverServiceDataType { VerificationKey, @@ -36,64 +35,65 @@ pub enum ProverServiceDataType { /// There are 2 types: /// - small verification, finalization keys (used only during verification) /// - large setup keys, used during proving. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Keystore { /// Directory to store all the small keys. basedir: PathBuf, /// Directory to store large setup keys. - setup_data_path: Option, -} - -fn get_base_path() -> PathBuf { - let path = core_workspace_dir_or_current_dir(); - - let new_path = path.join("prover/crates/bin/vk_setup_data_generator_server_fri/data"); - if new_path.exists() { - return new_path; - } - - let mut components = path.components(); - components.next_back().unwrap(); - components - .as_path() - .join("prover/crates/bin/vk_setup_data_generator_server_fri/data") -} - -impl Default for Keystore { - fn default() -> Self { - Self { - basedir: get_base_path(), - setup_data_path: Some( - FriProverConfig::from_env() - .expect("FriProverConfig::from_env()") - .setup_data_path, - ), - } - } + setup_data_path: PathBuf, } impl Keystore { /// Base-dir is the location of smaller keys (like verification keys and finalization hints). /// Setup data path is used for the large setup keys. - pub fn new(basedir: PathBuf, setup_data_path: String) -> Self { + pub fn new(basedir: PathBuf) -> Self { Keystore { - basedir, - setup_data_path: Some(setup_data_path), + basedir: basedir.clone(), + setup_data_path: basedir, } } - pub fn new_with_optional_setup_path(basedir: PathBuf, setup_data_path: Option) -> Self { - Keystore { - basedir, - setup_data_path, + /// Uses automatic detection of the base path, and assumes that setup keys + /// are stored in the same directory. + /// + /// The "base" path is considered to be equivalent to the `prover/data/keys` + /// directory in the repository. + pub fn locate() -> Self { + // There might be several cases: + // - We're running from the prover workspace. + // - We're running from the core workspace. + // - We're running the binary from the docker. + let data_dir_path = match Workspace::locate() { + Workspace::None => { + // We're running a binary, likely in a docker. + // Keys can be in one of a few paths. + // We want to be very conservative here, and checking + // more locations than we likely need to not accidentally + // break something. + let paths = ["./prover/data", "./data", "/prover/data", "/data"]; + paths.iter().map(PathBuf::from).find(|path| path.exists()).unwrap_or_else(|| { + panic!("Failed to locate the prover data directory. Locations checked: {paths:?}") + }) + } + ws => { + // If we're running in the Cargo workspace, the data *must* be in `prover/data`. + ws.prover().join("data") + } + }; + let base_path = data_dir_path.join("keys"); + + Self { + basedir: base_path.clone(), + setup_data_path: base_path, } } - pub fn new_with_setup_data_path(setup_data_path: String) -> Self { - Keystore { - basedir: get_base_path(), - setup_data_path: Some(setup_data_path), + /// Will override the setup path, if present. + pub fn with_setup_path(mut self, setup_data_path: Option) -> Self { + if let Some(setup_data_path) = setup_data_path { + self.setup_data_path = setup_data_path; } + self } pub fn get_base_path(&self) -> &PathBuf { @@ -110,13 +110,9 @@ impl Keystore { ProverServiceDataType::VerificationKey => { self.basedir.join(format!("verification_{}_key.json", name)) } - ProverServiceDataType::SetupData => PathBuf::from(format!( - "{}/setup_{}_data.bin", - self.setup_data_path - .as_ref() - .expect("Setup data path not set"), - name - )), + ProverServiceDataType::SetupData => self + .setup_data_path + .join(format!("setup_{}_data.bin", name)), ProverServiceDataType::FinalizationHints => self .basedir .join(format!("finalization_hints_{}.bin", name)), @@ -465,6 +461,7 @@ impl Keystore { pub fn load_commitments(&self) -> anyhow::Result { Self::load_json_from_file(self.get_base_path().join("commitments.json")) } + pub fn save_commitments(&self, commitments: &VkCommitments) -> anyhow::Result<()> { Self::save_json_pretty(self.get_base_path().join("commitments.json"), &commitments) } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs b/prover/crates/lib/keystore/src/lib.rs similarity index 99% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs rename to prover/crates/lib/keystore/src/lib.rs index 4b66df56f182..7e60e3fa29cd 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs +++ b/prover/crates/lib/keystore/src/lib.rs @@ -26,7 +26,6 @@ pub mod commitment_utils; pub mod keystore; pub mod setup_data_generator; pub mod utils; -pub mod vk_commitment_helper; #[derive(Debug, Serialize, Deserialize)] #[serde( diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/crates/lib/keystore/src/setup_data_generator.rs similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs rename to prover/crates/lib/keystore/src/setup_data_generator.rs diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs similarity index 88% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs rename to prover/crates/lib/keystore/src/utils.rs index 1ac6c4f4230d..10504292d64f 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -1,5 +1,3 @@ -use std::path::PathBuf; - use anyhow::Context as _; use circuit_definitions::{ circuit_definitions::aux_layer::ZkSyncSnarkWrapperCircuit, @@ -13,6 +11,7 @@ use zkevm_test_harness::{ franklin_crypto::bellman::{CurveAffine, PrimeField, PrimeFieldRepr}, witness::recursive_aggregation::compute_leaf_params, }; +use zksync_basic_types::H256; use zksync_prover_fri_types::circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type, @@ -21,8 +20,6 @@ use zksync_prover_fri_types::circuit_definitions::{ scheduler::aux::BaseLayerCircuitType, }, }; -use zksync_types::H256; -use zksync_utils::locate_workspace; use crate::keystore::Keystore; @@ -115,29 +112,22 @@ pub fn calculate_snark_vk_hash(keystore: &Keystore) -> anyhow::Result { Ok(H256::from_slice(&computed_vk_hash)) } -/// Returns workspace of the core component, we assume that prover is one folder deeper. -/// Or fallback to current dir -pub fn core_workspace_dir_or_current_dir() -> PathBuf { - locate_workspace() - .map(|a| a.join("..")) - .unwrap_or_else(|| PathBuf::from(".")) -} - #[cfg(test)] mod tests { - use std::{path::PathBuf, str::FromStr}; + use std::str::FromStr; + + use zksync_utils::env::Workspace; use super::*; #[test] fn test_keyhash_generation() { - let mut path_to_input = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - path_to_input.push("historical_data"); + let path_to_input = Workspace::locate().prover().join("data/historical_data"); for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { let basepath = path_to_input.join(entry.file_name()); - let keystore = Keystore::new_with_optional_setup_path(basepath.clone(), None); + let keystore = Keystore::new(basepath.clone()); let expected = H256::from_str(&keystore.load_commitments().unwrap().snark_wrapper).unwrap(); diff --git a/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json new file mode 100644 index 000000000000..ff5b1727e26a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json b/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json deleted file mode 100644 index 73cd88457cd1..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json b/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json deleted file mode 100644 index c985254f247e..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json b/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json deleted file mode 100644 index c713af9a210d..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n prover_jobs_fri\n WHERE\n status <> 'skipped'\n AND status <> 'successful'\n AND aggregation_round = $1\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json new file mode 100644 index 000000000000..b5025c6ed18d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json new file mode 100644 index 000000000000..d8bd3223905c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Bytea", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json b/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json deleted file mode 100644 index d699aae174c7..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac" -} diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..8d1681440769 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE prover_fri_protocol_versions SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE prover_fri_protocol_versions DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..98eb1ee791c2 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE prover_fri_protocol_versions ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE prover_fri_protocol_versions SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN prover_fri_protocol_versions.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs index caf620882bc2..50df1046e67d 100644 --- a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs @@ -20,14 +20,14 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch) + prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch) VALUES ($1, $2, NOW(), $3) ON CONFLICT (id, protocol_version_patch) DO NOTHING "#, id.minor as i32, l1_verifier_config - .recursion_scheduler_level_vk_hash + .snark_wrapper_vk_hash .as_bytes(), id.patch.0 as i32 ) @@ -43,7 +43,7 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions WHERE @@ -57,9 +57,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await .unwrap() .map(|row| L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } @@ -67,7 +65,7 @@ impl FriProtocolVersionsDal<'_, '_> { let result = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions ORDER BY @@ -80,9 +78,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await?; Ok(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &result.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&result.snark_wrapper_vk_hash), }) } diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index c2dadae58d0b..4e68154290da 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -476,33 +476,6 @@ impl FriProverDal<'_, '_> { } } - pub async fn min_unproved_l1_batch_number_for_aggregation_round( - &mut self, - aggregation_round: AggregationRound, - ) -> Option { - sqlx::query!( - r#" - SELECT - l1_batch_number - FROM - prover_jobs_fri - WHERE - status <> 'skipped' - AND status <> 'successful' - AND aggregation_round = $1 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - "#, - aggregation_round as i16 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - } - pub async fn update_status(&mut self, id: u32, status: &str) { sqlx::query!( r#" diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 65d490ee4e08..9958527a98b0 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -927,12 +927,12 @@ impl FriWitnessGeneratorDal<'_, '_> { "#, AggregationRound::RecursionTip as i64, ) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| (row.l1_batch_number as u64)) - .collect() + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| (row.l1_batch_number as u64)) + .collect() } pub async fn requeue_stuck_leaf_jobs( diff --git a/prover/data/README.md b/prover/data/README.md new file mode 100644 index 000000000000..8391aa33ba5c --- /dev/null +++ b/prover/data/README.md @@ -0,0 +1,23 @@ +# Prover data directory + +This directory contains the data required to run provers. + +Currently, it has the following sub-directories: + +- [keys](./keys/): Data required for proof generation. This data is mapped to a single protocol version. +- [historical_data](./historical_data/) Descriptors for the protocol versions used in the past. + +## Keys directory + +`keys` directory is used by various components in the prover subsystem, and it generally can contain two kinds of data: + +- Small static files, like commitments, finalization hints, or verification keys. +- Big generated blobs, like setup keys. + +Small static files are committed to the repository. Big files are expected to be downloaded or generated on demand. Two +important notices as of Sep 2024: + +- Path to setup keys can be overridden via configuration. +- Proof compressor requires an universal setup file, named, for example, `setup_2^24.bin` or `setup_2^26.bin`. It's + handled separately from the rest of the keys, e.g. it has separate configuration variables, and can naturally occur in + the `$ZKSYNC_HOME/keys/setup` during development. diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json b/prover/data/historical_data/0.24.0/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json rename to prover/data/historical_data/0.24.0/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json b/prover/data/historical_data/0.24.0/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json rename to prover/data/historical_data/0.24.0/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json b/prover/data/historical_data/0.24.1/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json rename to prover/data/historical_data/0.24.1/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json b/prover/data/historical_data/0.24.1/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json rename to prover/data/historical_data/0.24.1/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json b/prover/data/historical_data/18/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json rename to prover/data/historical_data/18/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json b/prover/data/historical_data/18/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json rename to prover/data/historical_data/18/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json b/prover/data/historical_data/19/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json rename to prover/data/historical_data/19/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json b/prover/data/historical_data/19/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json rename to prover/data/historical_data/19/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json b/prover/data/historical_data/20/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json rename to prover/data/historical_data/20/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json b/prover/data/historical_data/20/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json rename to prover/data/historical_data/20/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json b/prover/data/historical_data/21/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json rename to prover/data/historical_data/21/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json b/prover/data/historical_data/21/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json rename to prover/data/historical_data/21/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json b/prover/data/historical_data/22/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json rename to prover/data/historical_data/22/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json b/prover/data/historical_data/22/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json rename to prover/data/historical_data/22/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json b/prover/data/historical_data/23/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json rename to prover/data/historical_data/23/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json b/prover/data/historical_data/23/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json rename to prover/data/historical_data/23/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md b/prover/data/historical_data/README.md similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md rename to prover/data/historical_data/README.md diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json b/prover/data/keys/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json rename to prover/data/keys/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/data/keys/finalization_hints_basic_1.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin rename to prover/data/keys/finalization_hints_basic_1.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin b/prover/data/keys/finalization_hints_basic_10.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin rename to prover/data/keys/finalization_hints_basic_10.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin b/prover/data/keys/finalization_hints_basic_11.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin rename to prover/data/keys/finalization_hints_basic_11.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin b/prover/data/keys/finalization_hints_basic_12.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin rename to prover/data/keys/finalization_hints_basic_12.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin b/prover/data/keys/finalization_hints_basic_13.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin rename to prover/data/keys/finalization_hints_basic_13.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin b/prover/data/keys/finalization_hints_basic_14.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin rename to prover/data/keys/finalization_hints_basic_14.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin b/prover/data/keys/finalization_hints_basic_15.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin rename to prover/data/keys/finalization_hints_basic_15.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin b/prover/data/keys/finalization_hints_basic_2.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin rename to prover/data/keys/finalization_hints_basic_2.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin b/prover/data/keys/finalization_hints_basic_255.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin rename to prover/data/keys/finalization_hints_basic_255.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin b/prover/data/keys/finalization_hints_basic_3.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin rename to prover/data/keys/finalization_hints_basic_3.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin b/prover/data/keys/finalization_hints_basic_4.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin rename to prover/data/keys/finalization_hints_basic_4.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin b/prover/data/keys/finalization_hints_basic_5.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin rename to prover/data/keys/finalization_hints_basic_5.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin b/prover/data/keys/finalization_hints_basic_6.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin rename to prover/data/keys/finalization_hints_basic_6.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin b/prover/data/keys/finalization_hints_basic_7.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin rename to prover/data/keys/finalization_hints_basic_7.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin b/prover/data/keys/finalization_hints_basic_8.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin rename to prover/data/keys/finalization_hints_basic_8.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin b/prover/data/keys/finalization_hints_basic_9.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin rename to prover/data/keys/finalization_hints_basic_9.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin b/prover/data/keys/finalization_hints_leaf_10.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin rename to prover/data/keys/finalization_hints_leaf_10.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin b/prover/data/keys/finalization_hints_leaf_11.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin rename to prover/data/keys/finalization_hints_leaf_11.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin b/prover/data/keys/finalization_hints_leaf_12.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin rename to prover/data/keys/finalization_hints_leaf_12.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin b/prover/data/keys/finalization_hints_leaf_13.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin rename to prover/data/keys/finalization_hints_leaf_13.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin b/prover/data/keys/finalization_hints_leaf_14.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin rename to prover/data/keys/finalization_hints_leaf_14.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin b/prover/data/keys/finalization_hints_leaf_15.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin rename to prover/data/keys/finalization_hints_leaf_15.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin b/prover/data/keys/finalization_hints_leaf_16.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin rename to prover/data/keys/finalization_hints_leaf_16.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin b/prover/data/keys/finalization_hints_leaf_17.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin rename to prover/data/keys/finalization_hints_leaf_17.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin b/prover/data/keys/finalization_hints_leaf_18.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin rename to prover/data/keys/finalization_hints_leaf_18.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin b/prover/data/keys/finalization_hints_leaf_3.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin rename to prover/data/keys/finalization_hints_leaf_3.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin b/prover/data/keys/finalization_hints_leaf_4.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin rename to prover/data/keys/finalization_hints_leaf_4.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin b/prover/data/keys/finalization_hints_leaf_5.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin rename to prover/data/keys/finalization_hints_leaf_5.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin b/prover/data/keys/finalization_hints_leaf_6.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin rename to prover/data/keys/finalization_hints_leaf_6.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin b/prover/data/keys/finalization_hints_leaf_7.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin rename to prover/data/keys/finalization_hints_leaf_7.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin b/prover/data/keys/finalization_hints_leaf_8.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin rename to prover/data/keys/finalization_hints_leaf_8.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin b/prover/data/keys/finalization_hints_leaf_9.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin rename to prover/data/keys/finalization_hints_leaf_9.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin b/prover/data/keys/finalization_hints_node.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin rename to prover/data/keys/finalization_hints_node.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin b/prover/data/keys/finalization_hints_recursion_tip.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin rename to prover/data/keys/finalization_hints_recursion_tip.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin b/prover/data/keys/finalization_hints_scheduler.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin rename to prover/data/keys/finalization_hints_scheduler.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json b/prover/data/keys/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json rename to prover/data/keys/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json b/prover/data/keys/verification_basic_10_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json rename to prover/data/keys/verification_basic_10_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json b/prover/data/keys/verification_basic_11_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json rename to prover/data/keys/verification_basic_11_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json b/prover/data/keys/verification_basic_12_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json rename to prover/data/keys/verification_basic_12_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json b/prover/data/keys/verification_basic_13_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json rename to prover/data/keys/verification_basic_13_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json b/prover/data/keys/verification_basic_14_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json rename to prover/data/keys/verification_basic_14_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json b/prover/data/keys/verification_basic_15_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json rename to prover/data/keys/verification_basic_15_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/data/keys/verification_basic_1_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json rename to prover/data/keys/verification_basic_1_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json b/prover/data/keys/verification_basic_255_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json rename to prover/data/keys/verification_basic_255_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json b/prover/data/keys/verification_basic_2_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json rename to prover/data/keys/verification_basic_2_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json b/prover/data/keys/verification_basic_3_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json rename to prover/data/keys/verification_basic_3_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json b/prover/data/keys/verification_basic_4_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json rename to prover/data/keys/verification_basic_4_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json b/prover/data/keys/verification_basic_5_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json rename to prover/data/keys/verification_basic_5_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json b/prover/data/keys/verification_basic_6_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json rename to prover/data/keys/verification_basic_6_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json b/prover/data/keys/verification_basic_7_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json rename to prover/data/keys/verification_basic_7_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json b/prover/data/keys/verification_basic_8_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json rename to prover/data/keys/verification_basic_8_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json b/prover/data/keys/verification_basic_9_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json rename to prover/data/keys/verification_basic_9_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json b/prover/data/keys/verification_leaf_10_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json rename to prover/data/keys/verification_leaf_10_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json b/prover/data/keys/verification_leaf_11_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json rename to prover/data/keys/verification_leaf_11_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json b/prover/data/keys/verification_leaf_12_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json rename to prover/data/keys/verification_leaf_12_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json b/prover/data/keys/verification_leaf_13_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json rename to prover/data/keys/verification_leaf_13_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json b/prover/data/keys/verification_leaf_14_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json rename to prover/data/keys/verification_leaf_14_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json b/prover/data/keys/verification_leaf_15_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json rename to prover/data/keys/verification_leaf_15_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json b/prover/data/keys/verification_leaf_16_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json rename to prover/data/keys/verification_leaf_16_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json b/prover/data/keys/verification_leaf_17_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json rename to prover/data/keys/verification_leaf_17_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json b/prover/data/keys/verification_leaf_18_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json rename to prover/data/keys/verification_leaf_18_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/data/keys/verification_leaf_3_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json rename to prover/data/keys/verification_leaf_3_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json b/prover/data/keys/verification_leaf_4_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json rename to prover/data/keys/verification_leaf_4_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json b/prover/data/keys/verification_leaf_5_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json rename to prover/data/keys/verification_leaf_5_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json b/prover/data/keys/verification_leaf_6_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json rename to prover/data/keys/verification_leaf_6_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json b/prover/data/keys/verification_leaf_7_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json rename to prover/data/keys/verification_leaf_7_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json b/prover/data/keys/verification_leaf_8_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json rename to prover/data/keys/verification_leaf_8_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json b/prover/data/keys/verification_leaf_9_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json rename to prover/data/keys/verification_leaf_9_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json b/prover/data/keys/verification_node_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json rename to prover/data/keys/verification_node_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json b/prover/data/keys/verification_recursion_tip_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json rename to prover/data/keys/verification_recursion_tip_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/data/keys/verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json rename to prover/data/keys/verification_scheduler_key.json diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md index 441a8225f866..e09a44cb0ff7 100644 --- a/prover/docs/05_proving_batch.md +++ b/prover/docs/05_proving_batch.md @@ -72,13 +72,13 @@ input file, called `witness_inputs_.bin` generated by different core comp batch, that was already proven. Example: ```shell - curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d 'null' + wget --content-disposition {address}/proof_generation_data ``` or ```shell - curl -H "Content-Type: application/json" -X POST {address}/proof_generation_data -d '1000' + wget --content-disposition {address}/proof_generation_data/{l1_batch_number} ``` ### Preparing database @@ -140,6 +140,12 @@ And you are good to go! The prover subsystem will prove the batch and you can ch Now, assuming the proof is already generated, you can verify using `ExternalProofIntegrationAPI`. Usually proof is stored in GCS bucket(for which you can use the same steps as for getting the witness inputs data [here](#getting-data-needed-for-proving), but locally you can find it in `/artifacts/proofs_fri` directory). Now, simply -send the data to the endpoint `{address}/verify_batch/{batch_number}`. Note, that you need to pass the generated proof -as serialized JSON data when calling the endpoint. API will respond with status 200 if the proof is valid and with the -error message otherwise. +send the data to the endpoint `{address}/verify_batch/{batch_number}`. + +Example: + +```shell +curl -v -F proof=@{path_to_proof_binary} {address_of_API}/verify_proof/{l1_batch_number} +``` + +API will respond with status 200 if the proof is valid and with the error message otherwise. diff --git a/renovate.json b/renovate.json index 055bc3425806..eeccfee848dc 100644 --- a/renovate.json +++ b/renovate.json @@ -1,11 +1,5 @@ { - "enabled": false, - "extends": [ - "config:base", - "helpers:pinGitHubActionDigests" - ], - "enabledManagers": [ - "github-actions" - ], + "extends": ["config:base", "schedule:earlyMondays","helpers:pinGitHubActionDigests"], + "enabledManagers": ["github-actions"], "prCreation": "immediate" } diff --git a/yarn.lock b/yarn.lock index 173a06e631f6..f400104b9c20 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9776,7 +9776,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +"string-width-cjs@npm:string-width@^4.2.0": version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -9793,6 +9793,15 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -9859,7 +9868,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -9880,6 +9889,13 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -9990,7 +10006,7 @@ synckit@^0.8.6: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" - zksync-ethers "https://github.com/zksync-sdk/zksync-ethers#ethers-v5-feat/bridgehub" + zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" @@ -10725,7 +10741,16 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -10879,17 +10904,18 @@ zksync-ethers@5.8.0-beta.5: dependencies: ethers "~5.7.0" +zksync-ethers@^5.9.0: + version "5.9.2" + resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-5.9.2.tgz#1c5f34cb25ac0b040fd1a6118f2ba1c2c3bda090" + integrity sha512-Y2Mx6ovvxO6UdC2dePLguVzvNToOY8iLWeq5ne+jgGSJxAi/f4He/NF6FNsf6x1aWX0o8dy4Df8RcOQXAkj5qw== + dependencies: + ethers "~5.7.0" + zksync-ethers@^6.9.0: version "6.9.0" resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-6.9.0.tgz#efaff1d59e2cff837eeda84c4ba59fdca4972a91" integrity sha512-2CppwvLHtz689L7E9EhevbFtsqVukKC/lVicwdeUS2yqV46ET4iBR11rYdEfGW2oEo1h6yJuuwIBDFm2SybkIA== -"zksync-ethers@https://github.com/zksync-sdk/zksync-ethers#ethers-v5-feat/bridgehub": - version "5.1.0" - resolved "https://github.com/zksync-sdk/zksync-ethers#28ccbe7d67b170c202b17475e06a82002e6e3acc" - dependencies: - ethers "~5.7.0" - zksync-web3@^0.15.4: version "0.15.5" resolved "https://registry.yarnpkg.com/zksync-web3/-/zksync-web3-0.15.5.tgz#aabe379464963ab573e15948660a709f409b5316" diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 868c9d614fc3..bcbd5b232320 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -409,6 +409,18 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blst" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "bs58" version = "0.5.1" @@ -631,7 +643,7 @@ dependencies = [ "hmac", "once_cell", "pbkdf2 0.12.2", - "rand", + "rand 0.8.5", "sha2", "thiserror", ] @@ -709,7 +721,7 @@ dependencies = [ "clap", "common", "ethers", - "rand", + "rand 0.8.5", "serde", "serde_json", "strum", @@ -857,7 +869,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -869,7 +881,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -903,6 +915,33 @@ dependencies = [ "cipher", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "darling" version = "0.13.4" @@ -1120,6 +1159,31 @@ dependencies = [ "spki 0.7.3", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" @@ -1143,7 +1207,7 @@ dependencies = [ "generic-array", "group 0.12.1", "pkcs8 0.9.0", - "rand_core", + "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", @@ -1162,7 +1226,7 @@ dependencies = [ "generic-array", "group 0.13.0", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "sec1 0.7.3", "subtle", "zeroize", @@ -1212,7 +1276,7 @@ dependencies = [ "hex", "k256 0.13.3", "log", - "rand", + "rand 0.8.5", "rlp", "serde", "sha3", @@ -1267,7 +1331,7 @@ dependencies = [ "hex", "hmac", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -1430,7 +1494,7 @@ dependencies = [ "num_enum 0.7.2", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "serde", "serde_json", @@ -1535,7 +1599,7 @@ dependencies = [ "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", - "rand", + "rand 0.8.5", "sha2", "thiserror", "tracing", @@ -1606,7 +1670,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -1616,10 +1680,28 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] +[[package]] +name = "ff_ce" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" +dependencies = [ + "byteorder", + "hex", + "rand 0.4.6", + "serde", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "findshlibs" version = "0.10.2" @@ -1639,7 +1721,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -1711,6 +1793,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "2.0.0" @@ -1899,7 +1987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -1910,7 +1998,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2740,7 +2828,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -2823,7 +2911,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "smallvec", "zeroize", ] @@ -2986,9 +3074,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -3018,9 +3106,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -3119,7 +3207,7 @@ dependencies = [ "once_cell", "opentelemetry", "percent-encoding", - "rand", + "rand 0.8.5", "serde_json", "thiserror", "tokio", @@ -3220,7 +3308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3319,7 +3407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared 0.11.2", - "rand", + "rand 0.8.5", ] [[package]] @@ -3530,7 +3618,7 @@ dependencies = [ "bitflags 2.6.0", "lazy_static", "num-traits", - "rand", + "rand 0.8.5", "rand_chacha", "rand_xorshift", "regex-syntax 0.8.4", @@ -3680,6 +3768,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + [[package]] name = "rand" version = "0.8.5" @@ -3688,7 +3789,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -3698,9 +3799,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.6.4" @@ -3716,7 +3832,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -3739,6 +3855,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -3995,7 +4120,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "signature 2.2.0", "spki 0.7.3", "subtle", @@ -4349,7 +4474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -4396,7 +4521,7 @@ checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror", @@ -4586,7 +4711,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4596,7 +4721,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4840,7 +4965,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand", + "rand 0.8.5", "rsa", "serde", "sha1", @@ -4879,7 +5004,7 @@ dependencies = [ "md-5", "memchr", "once_cell", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2", @@ -5145,6 +5270,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.3.36" @@ -5397,7 +5531,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -5540,7 +5674,7 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.21.12", "sha1", "thiserror", @@ -6278,6 +6412,7 @@ dependencies = [ "human-panic", "lazy_static", "rand", + "secrecy", "serde", "serde_json", "serde_yaml", @@ -6291,6 +6426,8 @@ dependencies = [ "xshell", "zksync_basic_types", "zksync_config", + "zksync_consensus_crypto", + "zksync_consensus_roles", ] [[package]] @@ -6302,12 +6439,15 @@ dependencies = [ "clap-markdown", "common", "config", + "ethers", "futures", "human-panic", "serde", "serde_json", + "serde_yaml", "strum", "tokio", + "types", "url", "xshell", ] @@ -6347,14 +6487,14 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", "pin-project", - "rand", + "rand 0.8.5", "sha3", "thiserror", "time", @@ -6369,7 +6509,7 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "rand", + "rand 0.8.5", "secrecy", "serde", "url", @@ -6379,14 +6519,59 @@ dependencies = [ "zksync_crypto_primitives", ] +[[package]] +name = "zksync_consensus_crypto" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" +dependencies = [ + "anyhow", + "blst", + "ed25519-dalek", + "elliptic-curve 0.13.8", + "ff_ce", + "hex", + "k256 0.13.3", + "num-bigint", + "num-traits", + "rand 0.4.6", + "rand 0.8.5", + "sha3", + "thiserror", + "tracing", + "zeroize", +] + +[[package]] +name = "zksync_consensus_roles" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" +dependencies = [ + "anyhow", + "bit-vec", + "hex", + "num-bigint", + "prost 0.12.6", + "rand 0.8.5", + "serde", + "thiserror", + "tracing", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", - "rand", + "rand 0.8.5", "thiserror", "zksync_concurrency", ] @@ -6411,7 +6596,7 @@ dependencies = [ "anyhow", "blake2", "hex", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde_json", @@ -6432,9 +6617,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -6442,7 +6627,7 @@ dependencies = [ "prost 0.12.6", "prost-reflect", "quick-protobuf", - "rand", + "rand 0.8.5", "serde", "serde_json", "serde_yaml", @@ -6453,9 +6638,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck", @@ -6475,7 +6660,7 @@ dependencies = [ "anyhow", "hex", "prost 0.12.6", - "rand", + "rand 0.8.5", "secrecy", "serde_json", "serde_yaml", @@ -6537,7 +6722,6 @@ dependencies = [ "bigdecimal", "futures", "hex", - "itertools 0.10.5", "num", "once_cell", "reqwest 0.12.5", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 4a08776558ed..33309872ea3b 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,9 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.11" +zksync_consensus_roles = "=0.1.1" +zksync_consensus_crypto = "=0.1.1" +zksync_protobuf = "=0.1.1" # External dependencies anyhow = "1.0.82" @@ -59,3 +61,4 @@ toml = "0.8.12" url = { version = "2.5.0", features = ["serde"] } xshell = "0.2.6" clap-markdown = "0.1.4" +secrecy = "0.8.0" diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index debbb511df3f..a3b44fa98b32 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -20,7 +20,7 @@ Install `zk_inception` from Git: cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force ``` -Or manually build from a local copy of the [ZkSync](https://github.com/matter-labs/zksync-era/) repository: +Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash ./bin/zkt @@ -37,7 +37,7 @@ Foundry is used for deploying smart contracts. Pass flags for Foundry integratio ZK Stack allows you to create a new ecosystem or connect to an existing one. An ecosystem includes components like the BridgeHub, shared bridges, and state transition managers. -[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges.html). +[Learn more](https://docs.zksync.io/zk-stack/components/shared-bridges). #### Global Config @@ -247,6 +247,53 @@ Run the external node: zk_inception en run ``` +### Portal + +Once you have at least one chain initialized, you can run the [portal](https://github.com/matter-labs/dapp-portal) - a +web-app to bridge tokens between L1 and L2 and more: + +```bash +zk_inception portal +``` + +This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your +ecosystem directory. You can edit this file to configure the portal app if needed. By default, portal starts on +`http://localhost:3030`, you can configure the port in `apps.yaml` file. + +### Explorer + +For better understanding of the blockchain data, you can use the +[explorer](https://github.com/matter-labs/block-explorer) - a web-app to view and inspect transactions, blocks, +contracts and more. + +First, each chain should be initialized: + +```bash +zk_inception explorer init +``` + +This command creates a database to store explorer data and generatesdocker compose file with explorer services +(`explorer-docker-compose.yml`). + +Next, for each chain you want to have an explorer, you need to start its backend services: + +```bash +zk_inception explorer backend --chain +``` + +This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for +the explorer. + +Finally, you can run the explorer app: + +```bash +zk_inception explorer run +``` + +This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside +your ecosystem directory. You can edit this file to configure the app if needed. By default, explorer starts on +`http://localhost:3010`, you can configure the port in `apps.yaml` file. + ### Update To update your node: @@ -260,7 +307,7 @@ needed. ## ZK Supervisor -Tools for developing zkSync. +Tools for developing ZKsync. ### Database @@ -296,7 +343,7 @@ Possible commands: ### Tests -Run zkSync tests: +Run ZKsync tests: ```bash zk_supervisor test @@ -320,6 +367,14 @@ Create a snapshot of the current chain: zks snapshot create ``` +### Contracts + +Build contracts: + +```bash +zks contracts +``` + ### Format Format code: @@ -342,7 +397,7 @@ Lint code: zks lint ``` -By default, this command runs the linter on all files. To target specific file types, use the `--extension` option. +By default, this command runs the linter on all files. To target specific file types, use the `--target` option. Supported extensions include: - `rs`: Rust files. @@ -350,3 +405,4 @@ Supported extensions include: - `sol`: Solidity files. - `js`: JavaScript files. - `ts`: TypeScript files. +- `contracts`: files in `contracts` directory. diff --git a/zk_toolbox/crates/common/src/docker.rs b/zk_toolbox/crates/common/src/docker.rs index 0ca31383f9cc..a5731808814f 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zk_toolbox/crates/common/src/docker.rs @@ -1,26 +1,33 @@ -use std::collections::HashMap; - +use url::Url; use xshell::{cmd, Shell}; use crate::cmd::Cmd; -pub fn up(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run()?) +pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Result<()> { + let args = if detach { vec!["-d"] } else { vec![] }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} up {args...}" + )); + cmd = if !detach { cmd.with_force_run() } else { cmd }; + Ok(cmd.run()?) } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) } -pub fn run( - shell: &Shell, - docker_image: &str, - docker_args: HashMap, -) -> anyhow::Result<()> { - let mut args = vec![]; - for (key, value) in docker_args.iter() { - args.push(key); - args.push(value); +pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { + Ok(Cmd::new(cmd!(shell, "docker run {docker_args...} {docker_image}")).run()?) +} + +pub fn adjust_localhost_for_docker(mut url: Url) -> anyhow::Result { + if let Some(host) = url.host_str() { + if host == "localhost" || host == "127.0.0.1" { + url.set_host(Some("host.docker.internal"))?; + } + } else { + anyhow::bail!("Failed to parse: no host"); } - Ok(Cmd::new(cmd!(shell, "docker run {args...} {docker_image}")).run()?) + Ok(url) } diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zk_toolbox/crates/common/src/external_node.rs new file mode 100644 index 000000000000..8a5cbc3cd14c --- /dev/null +++ b/zk_toolbox/crates/common/src/external_node.rs @@ -0,0 +1,33 @@ +use anyhow::Context; +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn run( + shell: &Shell, + code_path: &str, + config_path: &str, + secrets_path: &str, + en_config_path: &str, + consensus_args: Vec, + additional_args: Vec, +) -> anyhow::Result<()> { + let _dir = shell.push_dir(code_path); + + let cmd = Cmd::new( + cmd!( + shell, + "cargo run --release --bin zksync_external_node -- + --config-path {config_path} + --secrets-path {secrets_path} + --external-node-config-path {en_config_path} + " + ) + .args(consensus_args) + .args(additional_args) + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .with_force_run(); + + cmd.run().context("Failed to run external node") +} diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index 2ab5c5f10e13..7be4af740700 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -7,12 +7,16 @@ pub mod config; pub mod db; pub mod docker; pub mod ethereum; +pub mod external_node; pub mod files; pub mod forge; pub mod git; pub mod server; pub mod wallets; -pub use prerequisites::{check_general_prerequisites, check_prover_prequisites}; +pub use prerequisites::{ + check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, + PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, +}; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 6c437302470d..665096d8486e 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -30,15 +30,7 @@ const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { download_link: "https://docs.docker.com/compose/install/", }; -const PROVER_PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "gcloud", - download_link: "https://cloud.google.com/sdk/docs/install", - }, - Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", - }, +pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", @@ -53,7 +45,23 @@ const PROVER_PREREQUISITES: [Prerequisite; 5] = [ }, // CUDA GPU driver ]; -struct Prerequisite { +pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { + name: "wget", + download_link: "https://www.gnu.org/software/wget/", +}]; + +pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { + name: "gcloud", + download_link: "https://cloud.google.com/sdk/docs/install", +}]; + +pub const PROVER_CLI_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { + name: "prover_cli", + download_link: + "https://github.com/matter-labs/zksync-era/tree/main/prover/crates/bin/prover_cli", +}]; + +pub struct Prerequisite { name: &'static str, download_link: &'static str, } @@ -62,11 +70,7 @@ pub fn check_general_prerequisites(shell: &Shell) { check_prerequisites(shell, &PREREQUISITES, true); } -pub fn check_prover_prequisites(shell: &Shell) { - check_prerequisites(shell, &PROVER_PREREQUISITES, false); -} - -fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { +pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { diff --git a/zk_toolbox/crates/common/src/server.rs b/zk_toolbox/crates/common/src/server.rs index c65c8d4c13e2..40da1cf80325 100644 --- a/zk_toolbox/crates/common/src/server.rs +++ b/zk_toolbox/crates/common/src/server.rs @@ -9,6 +9,7 @@ use crate::cmd::Cmd; pub struct Server { components: Option>, code_path: PathBuf, + uring: bool, } /// Possible server modes. @@ -20,10 +21,11 @@ pub enum ServerMode { impl Server { /// Creates a new instance of the server. - pub fn new(components: Option>, code_path: PathBuf) -> Self { + pub fn new(components: Option>, code_path: PathBuf, uring: bool) -> Self { Self { components, code_path, + uring, } } @@ -52,10 +54,12 @@ impl Server { additional_args.push("--genesis".to_string()); } + let uring = self.uring.then_some("--features=rocksdb/io-uring"); + let mut cmd = Cmd::new( cmd!( shell, - "cargo run --release --bin zksync_server -- + "cargo run --release --bin zksync_server {uring...} -- --genesis-path {genesis_path} --wallets-path {wallets_path} --config-path {general_path} diff --git a/zk_toolbox/crates/config/src/apps.rs b/zk_toolbox/crates/config/src/apps.rs new file mode 100644 index 000000000000..697b35b0851b --- /dev/null +++ b/zk_toolbox/crates/config/src/apps.rs @@ -0,0 +1,59 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Ecosystem level configuration for the apps (portal and explorer). +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppsEcosystemConfig { + pub portal: AppEcosystemConfig, + pub explorer: AppEcosystemConfig, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppEcosystemConfig { + pub http_port: u16, +} + +impl ZkToolboxConfig for AppsEcosystemConfig {} +impl FileConfigWithDefaultName for AppsEcosystemConfig { + const FILE_NAME: &'static str = APPS_CONFIG_FILE; +} + +impl AppsEcosystemConfig { + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(APPS_CONFIG_FILE) + } + + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } +} + +impl Default for AppsEcosystemConfig { + fn default() -> Self { + AppsEcosystemConfig { + portal: AppEcosystemConfig { + http_port: DEFAULT_PORTAL_PORT, + }, + explorer: AppEcosystemConfig { + http_port: DEFAULT_EXPLORER_PORT, + }, + } + } +} diff --git a/zk_toolbox/crates/config/src/chain.rs b/zk_toolbox/crates/config/src/chain.rs index d8cc53954352..affc8ccc770c 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zk_toolbox/crates/config/src/chain.rs @@ -34,9 +34,12 @@ pub struct ChainConfigInternal { pub configs: PathBuf, pub rocks_db_path: PathBuf, pub external_node_config_path: Option, + pub artifacts_path: Option, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub base_token: BaseToken, pub wallet_creation: WalletCreation, + #[serde(skip_serializing_if = "Option::is_none")] + pub legacy_bridge: Option, } /// Chain configuration file. This file is created in the chain @@ -50,12 +53,14 @@ pub struct ChainConfig { pub l1_network: L1Network, pub link_to_code: PathBuf, pub rocks_db_path: PathBuf, + pub artifacts: PathBuf, pub configs: PathBuf, pub external_node_config_path: Option, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub base_token: BaseToken, pub wallet_creation: WalletCreation, pub shell: OnceCell, + pub legacy_bridge: Option, } impl Serialize for ChainConfig { @@ -147,9 +152,11 @@ impl ChainConfig { configs: self.configs.clone(), rocks_db_path: self.rocks_db_path.clone(), external_node_config_path: self.external_node_config_path.clone(), + artifacts_path: Some(self.artifacts.clone()), l1_batch_commit_data_generator_mode: self.l1_batch_commit_data_generator_mode, base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, + legacy_bridge: self.legacy_bridge, } } } diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zk_toolbox/crates/config/src/consensus_config.rs new file mode 100644 index 000000000000..0bb4750d1fc0 --- /dev/null +++ b/zk_toolbox/crates/config/src/consensus_config.rs @@ -0,0 +1,18 @@ +use zksync_config::configs::consensus::ConsensusConfig; +use zksync_protobuf_config::encode_yaml_repr; + +use crate::{ + traits::{FileConfigWithDefaultName, SaveConfig}, + CONSENSUS_CONFIG_FILE, +}; + +impl FileConfigWithDefaultName for ConsensusConfig { + const FILE_NAME: &'static str = CONSENSUS_CONFIG_FILE; +} + +impl SaveConfig for ConsensusConfig { + fn save(&self, shell: &xshell::Shell, path: impl AsRef) -> anyhow::Result<()> { + let bytes = encode_yaml_repr::(self)?; + Ok(shell.write_file(path.as_ref(), bytes)?) + } +} diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zk_toolbox/crates/config/src/consensus_secrets.rs new file mode 100644 index 000000000000..0e5c4592d2fc --- /dev/null +++ b/zk_toolbox/crates/config/src/consensus_secrets.rs @@ -0,0 +1,14 @@ +use std::path::Path; + +use xshell::Shell; +use zksync_config::configs::consensus::ConsensusSecrets; +use zksync_protobuf_config::decode_yaml_repr; + +use crate::traits::ReadConfig; + +impl ReadConfig for ConsensusSecrets { + fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { + let path = shell.current_dir().join(path); + decode_yaml_repr::(&path, false) + } +} diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 4de534b816d5..80b204cc6191 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -11,6 +11,8 @@ pub const GENESIS_FILE: &str = "genesis.yaml"; // Name of external node specific config pub const EN_CONFIG_FILE: &str = "external_node.yaml"; +// Name of consensus config +pub const CONSENSUS_CONFIG_FILE: &str = "consensus_config.yaml"; pub(crate) const ERC20_CONFIGS_FILE: &str = "erc20.yaml"; /// Name of the initial deployments config file pub(crate) const INITIAL_DEPLOYMENT_FILE: &str = "initial_deployments.yaml"; @@ -30,11 +32,45 @@ pub const ERA_OBSERVABILITY_COMPOSE_FILE: &str = "era-observability/docker-compo pub const ERA_OBSERBAVILITY_DIR: &str = "era-observability"; /// Era observability repo link pub const ERA_OBSERBAVILITY_GIT_REPO: &str = "https://github.com/matter-labs/era-observability"; +pub(crate) const LOCAL_APPS_PATH: &str = "apps/"; +pub(crate) const LOCAL_CHAINS_PATH: &str = "chains/"; pub(crate) const LOCAL_CONFIGS_PATH: &str = "configs/"; +pub(crate) const LOCAL_GENERATED_PATH: &str = ".generated/"; pub(crate) const LOCAL_DB_PATH: &str = "db/"; +pub(crate) const LOCAL_ARTIFACTS_PATH: &str = "artifacts/"; -/// Name of portal config file -pub const PORTAL_CONFIG_FILE: &str = "portal.config.js"; +/// Name of apps config file +pub const APPS_CONFIG_FILE: &str = "apps.yaml"; +/// Name of portal runtime config file (auto-generated) +pub const PORTAL_JS_CONFIG_FILE: &str = "portal.config.js"; +/// Name of portal config JSON file +pub const PORTAL_CONFIG_FILE: &str = "portal.config.json"; +/// Name of explorer runtime config file (auto-generated) +pub const EXPLORER_JS_CONFIG_FILE: &str = "explorer.config.js"; +/// Name of explorer config JSON file +pub const EXPLORER_CONFIG_FILE: &str = "explorer.config.json"; +/// Name of explorer docker compose file +pub const EXPLORER_DOCKER_COMPOSE_FILE: &str = "explorer-docker-compose.yml"; + +/// Default port for the explorer app +pub const DEFAULT_EXPLORER_PORT: u16 = 3010; +/// Default port for the portal app +pub const DEFAULT_PORTAL_PORT: u16 = 3030; +/// Default port for the explorer worker service +pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; +/// Default port for the explorer API service +pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; +/// Default port for the explorer data fetcher service +pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; +/// Default port for consensus service +pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; + +pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; +pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; +pub const EXPLORER_WORKER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-worker"; + +/// Interval (in milliseconds) for polling new batches to process in explorer app +pub const EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL: u64 = 1000; /// Path to ecosystem contacts pub(crate) const ECOSYSTEM_PATH: &str = "etc/env/ecosystems"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 6042c4bea088..0d4b1c7b1f81 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -5,7 +5,9 @@ use crate::{ consts::CONTRACTS_FILE, forge_interface::{ deploy_ecosystem::output::DeployL1Output, - deploy_l2_contracts::output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + deploy_l2_contracts::output::{ + ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + }, register_chain::output::RegisterChainOutput, }, traits::{FileConfigWithDefaultName, ZkToolboxConfig}, @@ -67,6 +69,7 @@ impl ContractsConfig { self.ecosystem_contracts .diamond_cut_data .clone_from(&deploy_l1_output.contracts_config.diamond_cut_data); + self.l1.chain_admin_addr = deploy_l1_output.deployed_addresses.chain_admin; } pub fn set_chain_contracts(&mut self, register_chain_output: &RegisterChainOutput) { @@ -84,6 +87,14 @@ impl ContractsConfig { Ok(()) } + pub fn set_consensus_registry( + &mut self, + consensus_registry_output: &ConsensusRegistryOutput, + ) -> anyhow::Result<()> { + self.l2.consensus_registry = Some(consensus_registry_output.consensus_registry_proxy); + Ok(()) + } + pub fn set_default_l2_upgrade( &mut self, default_upgrade_output: &DefaultL2UpgradeOutput, @@ -140,4 +151,5 @@ pub struct L1Contracts { pub struct L2Contracts { pub testnet_paymaster_addr: Address, pub default_l2_upgrader: Address, + pub consensus_registry: Option
, } diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zk_toolbox/crates/config/src/docker_compose.rs new file mode 100644 index 000000000000..05c6e73eaea5 --- /dev/null +++ b/zk_toolbox/crates/config/src/docker_compose.rs @@ -0,0 +1,43 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +pub struct DockerComposeConfig { + pub services: HashMap, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DockerComposeService { + pub image: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub platform: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub ports: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub environment: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub volumes: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub depends_on: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub restart: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub extra_hosts: Option>, + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ZkToolboxConfig for DockerComposeConfig {} + +impl DockerComposeConfig { + pub fn add_service(&mut self, name: &str, service: DockerComposeService) { + self.services.insert(name.to_string(), service); + } +} diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 8ce4b733c26f..7ff65d4612df 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -3,7 +3,7 @@ use std::{ path::{Path, PathBuf}, }; -use common::logger; +use common::{config::global_config, logger}; use serde::{Deserialize, Serialize, Serializer}; use thiserror::Error; use types::{L1Network, ProverMode, WalletCreation}; @@ -14,7 +14,7 @@ use crate::{ consts::{ CONFIGS_PATH, CONFIG_NAME, CONTRACTS_FILE, ECOSYSTEM_PATH, ERA_CHAIN_ID, ERC20_CONFIGS_FILE, ERC20_DEPLOYMENT_FILE, INITIAL_DEPLOYMENT_FILE, L1_CONTRACTS_FOUNDRY, - LOCAL_DB_PATH, WALLETS_FILE, + LOCAL_ARTIFACTS_PATH, LOCAL_DB_PATH, WALLETS_FILE, }, create_localhost_wallets, forge_interface::deploy_ecosystem::{ @@ -139,6 +139,13 @@ impl EcosystemConfig { Ok(ecosystem) } + pub fn current_chain(&self) -> &str { + global_config() + .chain_name + .as_deref() + .unwrap_or(self.default_chain.as_ref()) + } + pub fn load_chain(&self, name: Option) -> Option { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) @@ -146,7 +153,7 @@ impl EcosystemConfig { fn load_chain_inner(&self, name: &str) -> Option { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; Some(ChainConfig { id: config.id, @@ -162,6 +169,11 @@ impl EcosystemConfig { rocks_db_path: config.rocks_db_path, wallet_creation: config.wallet_creation, shell: self.get_shell().clone().into(), + // It's required for backward compatibility + artifacts: config + .artifacts_path + .unwrap_or_else(|| self.get_chain_artifacts_path(name)), + legacy_bridge: config.legacy_bridge, }) } @@ -228,6 +240,10 @@ impl EcosystemConfig { self.chains.join(chain_name).join(LOCAL_DB_PATH) } + pub fn get_chain_artifacts_path(&self, chain_name: &str) -> PathBuf { + self.chains.join(chain_name).join(LOCAL_ARTIFACTS_PATH) + } + fn get_internal(&self) -> EcosystemConfigInternal { let bellman_cuda_dir = self .bellman_cuda_dir @@ -275,3 +291,10 @@ fn find_file(shell: &Shell, path_buf: PathBuf, file_name: &str) -> Result PathBuf { + let link_to_code = config.link_to_code.clone(); + let mut link_to_prover = link_to_code.into_os_string(); + link_to_prover.push("/prover"); + link_to_prover.into() +} diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zk_toolbox/crates/config/src/explorer.rs new file mode 100644 index 000000000000..ee7a59e5105c --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer.rs @@ -0,0 +1,147 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{ + EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, + LOCAL_GENERATED_PATH, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Explorer JSON configuration file. This file contains configuration for the explorer app. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerConfig { + pub app_environment: String, + pub environment_config: EnvironmentConfig, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EnvironmentConfig { + pub networks: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerChainConfig { + pub name: String, // L2 network chain name (the one used during the chain initialization) + pub l2_network_name: String, // How the network is displayed in the app dropdown + pub l2_chain_id: u64, + pub rpc_url: String, // L2 RPC URL + pub api_url: String, // L2 API URL + pub base_token_address: String, // L2 base token address (currently always 0x800A) + pub hostnames: Vec, // Custom domain to use when switched to this chain in the app + pub icon: String, // Icon to show in the explorer dropdown + pub maintenance: bool, // Maintenance warning + pub published: bool, // If false, the chain will not be shown in the explorer dropdown + #[serde(skip_serializing_if = "Option::is_none")] + pub bridge_url: Option, // Link to the portal bridge + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_explorer_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub verification_api_url: Option, // L2 verification API URL + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ExplorerConfig { + /// Returns the path to the explorer configuration file. + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) + .join(EXPLORER_CONFIG_FILE) + } + + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &ExplorerChainConfig) { + // Replace if config with the same network name already exists + if let Some(index) = self + .environment_config + .networks + .iter() + .position(|c| c.name == config.name) + { + self.environment_config.networks[index] = config.clone(); + return; + } + self.environment_config.networks.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.environment_config + .networks + .retain(|config| chain_names.contains(&config.name)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for network in &mut self.environment_config.networks { + network.published = chain_names.contains(&network.name); + } + } + + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.environment_config + .networks + .iter() + .any(|config| &config.name == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.environment_config.networks.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { + // The block-explorer-app is served as a pre-built static app in a Docker image. + // It uses a JavaScript file (config.js) that injects the configuration at runtime + // by overwriting the '##runtimeConfig' property of the window object. + // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); + let json = serde_json::to_string_pretty(&self)?; + let config_js_content = format!("window['##runtimeConfig'] = {};", json); + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(EXPLORER_JS_CONFIG_FILE) + } +} + +impl Default for ExplorerConfig { + fn default() -> Self { + ExplorerConfig { + app_environment: "default".to_string(), + environment_config: EnvironmentConfig { + networks: Vec::new(), + }, + other: serde_json::Value::Null, + } + } +} + +impl ZkToolboxConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zk_toolbox/crates/config/src/explorer_compose.rs new file mode 100644 index 000000000000..ca9abc1e3e23 --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer_compose.rs @@ -0,0 +1,214 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use anyhow::Context; +use common::{db, docker::adjust_localhost_for_docker}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + consts::{ + DEFAULT_EXPLORER_API_PORT, DEFAULT_EXPLORER_DATA_FETCHER_PORT, + DEFAULT_EXPLORER_WORKER_PORT, EXPLORER_API_DOCKER_IMAGE, + EXPLORER_DATA_FETCHER_DOCKER_IMAGE, EXPLORER_DOCKER_COMPOSE_FILE, + EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, + }, + docker_compose::{DockerComposeConfig, DockerComposeService}, + traits::ZkToolboxConfig, + EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendPorts { + pub api_http_port: u16, + pub data_fetcher_http_port: u16, + pub worker_http_port: u16, +} + +impl ExplorerBackendPorts { + pub fn with_offset(&self, offset: u16) -> Self { + ExplorerBackendPorts { + api_http_port: self.api_http_port + offset, + data_fetcher_http_port: self.data_fetcher_http_port + offset, + worker_http_port: self.worker_http_port + offset, + } + } +} + +impl Default for ExplorerBackendPorts { + fn default() -> Self { + ExplorerBackendPorts { + api_http_port: DEFAULT_EXPLORER_API_PORT, + data_fetcher_http_port: DEFAULT_EXPLORER_DATA_FETCHER_PORT, + worker_http_port: DEFAULT_EXPLORER_WORKER_PORT, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendConfig { + pub database_url: Url, + pub ports: ExplorerBackendPorts, + pub batches_processing_polling_interval: u64, +} + +impl ExplorerBackendConfig { + pub fn new(database_url: Url, ports: &ExplorerBackendPorts) -> Self { + ExplorerBackendConfig { + database_url, + ports: ports.clone(), + batches_processing_polling_interval: EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, + } + } +} + +/// Chain-level explorer backend docker compose file. +/// It contains configuration for api, data fetcher, and worker services. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendComposeConfig { + #[serde(flatten)] + pub docker_compose: DockerComposeConfig, +} + +impl ZkToolboxConfig for ExplorerBackendComposeConfig {} + +impl ExplorerBackendComposeConfig { + const API_NAME: &'static str = "api"; + const DATA_FETCHER_NAME: &'static str = "data-fetcher"; + const WORKER_NAME: &'static str = "worker"; + + pub fn new( + chain_name: &str, + l2_rpc_url: Url, + config: &ExplorerBackendConfig, + ) -> anyhow::Result { + let db_url = adjust_localhost_for_docker(config.database_url.clone())?; + let l2_rpc_url = adjust_localhost_for_docker(l2_rpc_url)?; + + let mut services: HashMap = HashMap::new(); + services.insert( + Self::API_NAME.to_string(), + Self::create_api_service(config.ports.api_http_port, db_url.as_ref()), + ); + services.insert( + Self::DATA_FETCHER_NAME.to_string(), + Self::create_data_fetcher_service( + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + ), + ); + + let worker = Self::create_worker_service( + config.ports.worker_http_port, + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + &db_url, + config.batches_processing_polling_interval, + ) + .context("Failed to create worker service")?; + services.insert(Self::WORKER_NAME.to_string(), worker); + + Ok(Self { + docker_compose: DockerComposeConfig { + name: Some(format!("{chain_name}-explorer")), + services, + other: serde_json::Value::Null, + }, + }) + } + + fn create_api_service(port: u16, db_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_API_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: Some(vec![Self::WORKER_NAME.to_string()]), + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_URL".to_string(), db_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_data_fetcher_service(port: u16, l2_rpc_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_DATA_FETCHER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_worker_service( + port: u16, + data_fetcher_port: u16, + l2_rpc_url: &str, + db_url: &Url, + batches_processing_polling_interval: u64, + ) -> anyhow::Result { + let data_fetcher_url = format!("http://{}:{}", Self::DATA_FETCHER_NAME, data_fetcher_port); + + // Parse database URL + let db_config = db::DatabaseConfig::from_url(db_url)?; + let db_user = db_url.username().to_string(); + let db_password = db_url.password().unwrap_or(""); + let db_port = db_url.port().unwrap_or(5432); + let db_host = db_url + .host_str() + .context("Failed to parse database host")? + .to_string(); + + Ok(DockerComposeService { + image: EXPLORER_WORKER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: None, + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_HOST".to_string(), db_host.to_string()), + ("DATABASE_PORT".to_string(), db_port.to_string()), + ("DATABASE_USER".to_string(), db_user.to_string()), + ("DATABASE_PASSWORD".to_string(), db_password.to_string()), + ("DATABASE_NAME".to_string(), db_config.name.to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ("DATA_FETCHER_URL".to_string(), data_fetcher_url), + ( + "BATCHES_PROCESSING_POLLING_INTERVAL".to_string(), + batches_processing_polling_interval.to_string(), + ), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + }) + } + + pub fn get_config_path(ecosystem_base_path: &Path, chain_name: &str) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CHAINS_PATH) + .join(chain_name) + .join(LOCAL_CONFIGS_PATH) + .join(EXPLORER_DOCKER_COMPOSE_FILE) + } +} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index bf9292e9ba30..7f35cf0357c2 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -44,6 +44,7 @@ pub struct DeployL1DeployedAddressesOutput { pub governance_addr: Address, pub transparent_proxy_admin_addr: Address, pub validator_timelock_addr: Address, + pub chain_admin: Address, pub bridgehub: L1BridgehubOutput, pub bridges: L1BridgesOutput, pub state_transition: L1StateTransitionOutput, diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index f48fd0ba2b5e..b20b58f99c58 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -6,6 +6,8 @@ use crate::{traits::ZkToolboxConfig, ChainConfig}; impl ZkToolboxConfig for DeployL2ContractsInput {} +/// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` +/// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DeployL2ContractsInput { pub era_chain_id: L2ChainId, @@ -14,6 +16,7 @@ pub struct DeployL2ContractsInput { pub bridgehub: Address, pub governance: Address, pub erc20_bridge: Address, + pub consensus_registry_owner: Address, } impl DeployL2ContractsInput { @@ -27,6 +30,7 @@ impl DeployL2ContractsInput { bridgehub: contracts.ecosystem_contracts.bridgehub_proxy_addr, governance: wallets.governor.address, erc20_bridge: contracts.bridges.erc20.l1_address, + consensus_registry_owner: wallets.governor.address, }) } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 22f3dc9381b3..860e7e293f99 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::traits::ZkToolboxConfig; impl ZkToolboxConfig for InitializeBridgeOutput {} - impl ZkToolboxConfig for DefaultL2UpgradeOutput {} +impl ZkToolboxConfig for ConsensusRegistryOutput {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { @@ -17,3 +17,9 @@ pub struct InitializeBridgeOutput { pub struct DefaultL2UpgradeOutput { pub l2_default_upgrader: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusRegistryOutput { + pub consensus_registry_implementation: Address, + pub consensus_registry_proxy: Address, +} diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zk_toolbox/crates/config/src/forge_interface/mod.rs index ea3d49c67ecb..c7033c45ed22 100644 --- a/zk_toolbox/crates/config/src/forge_interface/mod.rs +++ b/zk_toolbox/crates/config/src/forge_interface/mod.rs @@ -4,3 +4,4 @@ pub mod deploy_l2_contracts; pub mod paymaster; pub mod register_chain; pub mod script_params; +pub mod setup_legacy_bridge; diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zk_toolbox/crates/config/src/forge_interface/script_params.rs index fb16aa97e6a8..e7e21ad132b8 100644 --- a/zk_toolbox/crates/config/src/forge_interface/script_params.rs +++ b/zk_toolbox/crates/config/src/forge_interface/script_params.rs @@ -61,3 +61,9 @@ pub const ACCEPT_GOVERNANCE_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams output: "script-out/output-accept-admin.toml", script_path: "deploy-scripts/AcceptAdmin.s.sol", }; + +pub const SETUP_LEGACY_BRIDGE: ForgeScriptParams = ForgeScriptParams { + input: "script-config/setup-legacy-bridge.toml", + output: "script-out/setup-legacy-bridge.toml", + script_path: "deploy-scripts/dev/SetupLegacyBridge.s.sol", +}; diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs new file mode 100644 index 000000000000..e8189c521fb3 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -0,0 +1,20 @@ +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{Address, L2ChainId, H256}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SetupLegacyBridgeInput { + pub bridgehub: Address, + pub diamond_proxy: Address, + pub shared_bridge_proxy: Address, + pub transparent_proxy_admin: Address, + pub erc20bridge_proxy: Address, + pub token_weth_address: Address, + pub chain_id: L2ChainId, + pub l2shared_bridge_address: Address, + pub create2factory_salt: H256, + pub create2factory_addr: Address, +} + +impl ZkToolboxConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 4dfc6c17470d..6498beb0f532 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -4,17 +4,39 @@ use anyhow::Context; use url::Url; use xshell::Shell; pub use zksync_config::configs::GeneralConfig; +use zksync_config::configs::{consensus::Host, object_store::ObjectStoreMode}; use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, + DEFAULT_CONSENSUS_PORT, }; pub struct RocksDbs { pub state_keeper: PathBuf, pub merkle_tree: PathBuf, pub protective_reads: PathBuf, + pub basic_witness_input_producer: PathBuf, +} + +pub struct FileArtifacts { + pub public_object_store: PathBuf, + pub prover_object_store: PathBuf, + pub snapshot: PathBuf, + pub core_object_store: PathBuf, +} + +impl FileArtifacts { + /// Currently all artifacts are stored in one path, but we keep an opportunity to update this paths + pub fn new(path: PathBuf) -> Self { + Self { + public_object_store: path.clone(), + prover_object_store: path.clone(), + snapshot: path.clone(), + core_object_store: path.clone(), + } + } } pub fn set_rocks_db_config(config: &mut GeneralConfig, rocks_dbs: RocksDbs) -> anyhow::Result<()> { @@ -34,17 +56,79 @@ pub fn set_rocks_db_config(config: &mut GeneralConfig, rocks_dbs: RocksDbs) -> a .as_mut() .context("Protective reads config is not presented")? .db_path = rocks_dbs.protective_reads.to_str().unwrap().to_string(); + config + .basic_witness_input_producer_config + .as_mut() + .context("Basic witness input producer config is not presented")? + .db_path = rocks_dbs + .basic_witness_input_producer + .to_str() + .unwrap() + .to_string(); Ok(()) } +pub fn set_file_artifacts(config: &mut GeneralConfig, file_artifacts: FileArtifacts) { + macro_rules! set_artifact_path { + ($config:expr, $name:ident, $value:expr) => { + $config + .as_mut() + .map(|a| set_artifact_path!(a.$name, $value)) + }; + + ($config:expr, $value:expr) => { + $config.as_mut().map(|a| { + if let ObjectStoreMode::FileBacked { + ref mut file_backed_base_path, + } = &mut a.mode + { + *file_backed_base_path = $value.to_str().unwrap().to_string() + } + }) + }; + } + + set_artifact_path!( + config.prover_config, + prover_object_store, + file_artifacts.prover_object_store + ); + set_artifact_path!( + config.prover_config, + public_object_store, + file_artifacts.public_object_store + ); + set_artifact_path!( + config.snapshot_creator, + object_store, + file_artifacts.snapshot + ); + set_artifact_path!( + config.snapshot_recovery, + object_store, + file_artifacts.snapshot + ); + + set_artifact_path!(config.core_object_store, file_artifacts.core_object_store); +} + pub fn ports_config(config: &GeneralConfig) -> Option { let api = config.api_config.as_ref()?; + let contract_verifier = config.contract_verifier.as_ref()?; + let consensus_port = if let Some(consensus_config) = config.clone().consensus_config { + consensus_config.server_addr.port() + } else { + DEFAULT_CONSENSUS_PORT + }; + Some(PortsConfig { web3_json_rpc_http_port: api.web3_json_rpc.http_port, web3_json_rpc_ws_port: api.web3_json_rpc.ws_port, healthcheck_port: api.healthcheck.port, merkle_tree_port: api.merkle_tree.port, prometheus_listener_port: api.prometheus.listener_port, + contract_verifier_port: contract_verifier.port, + consensus_port, }) } @@ -53,6 +137,19 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a .api_config .as_mut() .context("Api config is not presented")?; + let contract_verifier = config + .contract_verifier + .as_mut() + .context("Contract Verifier config is not presented")?; + let prometheus = config + .prometheus_config + .as_mut() + .context("Prometheus config is not presented")?; + if let Some(consensus) = config.consensus_config.as_mut() { + consensus.server_addr.set_port(ports_config.consensus_port); + update_port_in_host(&mut consensus.public_addr, ports_config.consensus_port)?; + } + api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; update_port_in_url( &mut api.web3_json_rpc.http_url, @@ -63,9 +160,17 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a &mut api.web3_json_rpc.ws_url, ports_config.web3_json_rpc_ws_port, )?; + contract_verifier.port = ports_config.contract_verifier_port; + update_port_in_url( + &mut contract_verifier.url, + ports_config.contract_verifier_port, + )?; api.healthcheck.port = ports_config.healthcheck_port; api.merkle_tree.port = ports_config.merkle_tree_port; api.prometheus.listener_port = ports_config.prometheus_listener_port; + + prometheus.listener_port = ports_config.prometheus_listener_port; + Ok(()) } @@ -74,7 +179,14 @@ fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { if let Err(()) = http_url_url.set_port(Some(port)) { anyhow::bail!("Wrong url, setting port is impossible"); } - *http_url = http_url_url.as_str().to_string(); + *http_url = http_url_url.to_string(); + Ok(()) +} + +fn update_port_in_host(host: &mut Host, port: u16) -> anyhow::Result<()> { + let url = Url::parse(&format!("http://{}", host.0))?; + let host_str = url.host_str().context("Failed to get host")?; + host.0 = format!("{host_str}:{port}"); Ok(()) } @@ -88,9 +200,21 @@ pub struct PortsConfig { pub healthcheck_port: u16, pub merkle_tree_port: u16, pub prometheus_listener_port: u16, + pub contract_verifier_port: u16, + pub consensus_port: u16, } impl PortsConfig { + pub fn apply_offset(&mut self, offset: u16) { + self.web3_json_rpc_http_port += offset; + self.web3_json_rpc_ws_port += offset; + self.healthcheck_port += offset; + self.merkle_tree_port += offset; + self.prometheus_listener_port += offset; + self.contract_verifier_port += offset; + self.consensus_port += offset; + } + pub fn next_empty_ports_config(&self) -> PortsConfig { Self { web3_json_rpc_http_port: self.web3_json_rpc_http_port + 100, @@ -98,6 +222,8 @@ impl PortsConfig { healthcheck_port: self.healthcheck_port + 100, merkle_tree_port: self.merkle_tree_port + 100, prometheus_listener_port: self.prometheus_listener_port + 100, + contract_verifier_port: self.contract_verifier_port + 100, + consensus_port: self.consensus_port + 100, } } } @@ -116,3 +242,14 @@ impl ReadConfig for GeneralConfig { decode_yaml_repr::(&path, false) } } + +impl ConfigWithL2RpcUrl for GeneralConfig { + fn get_l2_rpc_url(&self) -> anyhow::Result { + self.api_config + .as_ref() + .map(|api_config| &api_config.web3_json_rpc.http_url) + .context("API config is missing")? + .parse() + .context("Failed to parse L2 RPC URL") + } +} diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 4e00962229bc..1a7c5bf1d7e2 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -1,3 +1,4 @@ +pub use apps::*; pub use chain::*; pub use consts::*; pub use contracts::*; @@ -11,6 +12,7 @@ pub use wallet_creation::*; pub use wallets::*; pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +mod apps; mod chain; mod consts; mod contracts; @@ -23,6 +25,11 @@ mod secrets; mod wallet_creation; mod wallets; +pub mod consensus_config; +pub mod consensus_secrets; +pub mod docker_compose; +pub mod explorer; +pub mod explorer_compose; pub mod external_node; pub mod forge_interface; pub mod portal; diff --git a/zk_toolbox/crates/config/src/portal.rs b/zk_toolbox/crates/config/src/portal.rs index 4b68d5744cd9..c787c6cc7026 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zk_toolbox/crates/config/src/portal.rs @@ -5,28 +5,25 @@ use types::TokenInfo; use xshell::Shell; use crate::{ - consts::{LOCAL_CONFIGS_PATH, PORTAL_CONFIG_FILE}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + consts::{ + LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, + PORTAL_JS_CONFIG_FILE, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, }; +/// Portal JSON configuration file. This file contains configuration for the portal app. #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] -pub struct PortalRuntimeConfig { +pub struct PortalConfig { pub node_type: String, - pub hyperchains_config: HyperchainsConfig, + pub hyperchains_config: Vec, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainsConfig(pub Vec); - -impl HyperchainsConfig { - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainConfig { +pub struct PortalChainConfig { pub network: NetworkConfig, pub tokens: Vec, } @@ -35,10 +32,12 @@ pub struct HyperchainConfig { #[serde(rename_all = "camelCase")] pub struct NetworkConfig { pub id: u64, // L2 Network ID - pub key: String, // L2 Network key - pub name: String, // L2 Network name + pub key: String, // L2 Network key (chain name used during the initialization) + pub name: String, // L2 Network name (displayed in the app dropdown) pub rpc_url: String, // L2 RPC URL #[serde(skip_serializing_if = "Option::is_none")] + pub hidden: Option, // If true, the chain will not be shown in the app dropdown + #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_url: Option, // L2 Block Explorer URL #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_api: Option, // L2 Block Explorer API @@ -46,6 +45,8 @@ pub struct NetworkConfig { pub public_l1_network_id: Option, // Ethereum Mainnet or Ethereum Sepolia Testnet ID #[serde(skip_serializing_if = "Option::is_none")] pub l1_network: Option, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -81,44 +82,94 @@ pub struct TokenConfig { pub name: Option, } -impl PortalRuntimeConfig { +impl PortalConfig { + /// Returns the path to the portal configuration file. pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { ecosystem_base_path .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) .join(PORTAL_CONFIG_FILE) } -} -impl FileConfigWithDefaultName for PortalRuntimeConfig { - const FILE_NAME: &'static str = PORTAL_CONFIG_FILE; -} + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &PortalChainConfig) { + // Replace if config with the same network key already exists + if let Some(index) = self + .hyperchains_config + .iter() + .position(|c| c.network.key == config.network.key) + { + self.hyperchains_config[index] = config.clone(); + return; + } + self.hyperchains_config.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.hyperchains_config + .retain(|config| chain_names.contains(&config.network.key)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for config in &mut self.hyperchains_config { + config.network.hidden = Some(!chain_names.contains(&config.network.key)); + } + } -impl SaveConfig for PortalRuntimeConfig { - fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.hyperchains_config + .iter() + .any(|config| &config.network.key == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.hyperchains_config.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { // The dapp-portal is served as a pre-built static app in a Docker image. // It uses a JavaScript file (config.js) that injects the configuration at runtime // by overwriting the '##runtimeConfig' property of the window object. - // Therefore, we generate a JavaScript file instead of a JSON file. // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); let json = serde_json::to_string_pretty(&self)?; let config_js_content = format!("window['##runtimeConfig'] = {};", json); - Ok(shell.write_file(path, config_js_content.as_bytes())?) + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(PORTAL_JS_CONFIG_FILE) } } -impl ReadConfig for PortalRuntimeConfig { - fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { - let config_js_content = shell.read_file(path)?; - // Extract the JSON part from the JavaScript file - let json_start = config_js_content - .find('{') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_end = config_js_content - .rfind('}') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_str = &config_js_content[json_start..=json_end]; - // Parse the JSON into PortalRuntimeConfig - let config: PortalRuntimeConfig = serde_json::from_str(json_str)?; - Ok(config) +impl Default for PortalConfig { + fn default() -> Self { + PortalConfig { + node_type: "hyperchain".to_string(), + hyperchains_config: Vec::new(), + other: serde_json::Value::Null, + } } } + +impl ZkToolboxConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/traits.rs b/zk_toolbox/crates/config/src/traits.rs index 1f00b39b040a..bb0722762e31 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zk_toolbox/crates/config/src/traits.rs @@ -5,6 +5,7 @@ use common::files::{ read_json_file, read_toml_file, read_yaml_file, save_json_file, save_toml_file, save_yaml_file, }; use serde::{de::DeserializeOwned, Serialize}; +use url::Url; use xshell::Shell; // Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. @@ -156,3 +157,7 @@ fn save_with_comment( } Ok(()) } + +pub trait ConfigWithL2RpcUrl { + fn get_l2_rpc_url(&self) -> anyhow::Result; +} diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zk_toolbox/crates/config/src/wallet_creation.rs index 5b44da5a13f3..5dfb0cec501a 100644 --- a/zk_toolbox/crates/config/src/wallet_creation.rs +++ b/zk_toolbox/crates/config/src/wallet_creation.rs @@ -64,6 +64,10 @@ pub fn create_localhost_wallets( blob_operator: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 2)?, fee_account: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 3)?, governor: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 4)?, - token_multiplier_setter: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 5)?, + token_multiplier_setter: Some(Wallet::from_mnemonic( + ð_mnemonic.test_mnemonic, + &base_path, + 5, + )?), }) } diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zk_toolbox/crates/config/src/wallets.rs index a2e5be87440a..9c87453954ec 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zk_toolbox/crates/config/src/wallets.rs @@ -15,7 +15,7 @@ pub struct WalletsConfig { pub blob_operator: Wallet, pub fee_account: Wallet, pub governor: Wallet, - pub token_multiplier_setter: Wallet, + pub token_multiplier_setter: Option, } impl WalletsConfig { @@ -27,7 +27,7 @@ impl WalletsConfig { blob_operator: Wallet::random(rng), fee_account: Wallet::random(rng), governor: Wallet::random(rng), - token_multiplier_setter: Wallet::random(rng), + token_multiplier_setter: Some(Wallet::random(rng)), } } @@ -39,7 +39,7 @@ impl WalletsConfig { blob_operator: Wallet::empty(), fee_account: Wallet::empty(), governor: Wallet::empty(), - token_multiplier_setter: Wallet::empty(), + token_multiplier_setter: Some(Wallet::empty()), } } pub fn deployer_private_key(&self) -> Option { diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 40cc1970ba67..5e3d391a04c2 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -35,3 +35,6 @@ zksync_config.workspace = true slugify-rs.workspace = true zksync_basic_types.workspace = true clap-markdown.workspace = true +zksync_consensus_roles.workspace = true +zksync_consensus_crypto.workspace = true +secrecy.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 73bfb56cfd39..904b1421e3a0 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -21,7 +21,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) -- [`zk_inception prover generate-sk`↴](#zk_inception-prover-generate-sk) +- [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) - [`zk_inception prover run`↴](#zk_inception-prover-run) - [`zk_inception prover init-bellman-cuda`↴](#zk_inception-prover-init-bellman-cuda) - [`zk_inception server`↴](#zk_inception-server) @@ -33,6 +33,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception contract-verifier`↴](#zk_inception-contract-verifier) - [`zk_inception contract-verifier run`↴](#zk_inception-contract-verifier-run) - [`zk_inception contract-verifier init`↴](#zk_inception-contract-verifier-init) +- [`zk_inception portal`↴](#zk_inception-portal) - [`zk_inception update`↴](#zk_inception-update) ## `zk_inception` @@ -50,7 +51,8 @@ ZK Toolbox is a set of tools for working with zk stack. - `external-node` — External Node related commands - `containers` — Run containers for local development - `contract-verifier` — Run contract verifier -- `update` — Update zkSync +- `portal` — Run dapp-portal +- `update` — Update ZKsync ###### **Options:** @@ -76,11 +78,7 @@ Ecosystem related commands Create a new ecosystem and chain, setting necessary configurations for later initialization -**Usage:** `zk_inception ecosystem create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception ecosystem create [OPTIONS]` ###### **Options:** @@ -91,6 +89,7 @@ Create a new ecosystem and chain, setting necessary configurations for later ini - `--link-to-code ` — Code link - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -166,7 +165,9 @@ Initialize ecosystem and chain, deploying necessary contracts and performing on- - `-u`, `--use-default` — Use default database urls and names - `-d`, `--dont-drop` - `--dev` — Deploy ecosystem using all defaults. Suitable for local development -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception ecosystem change-default-chain` @@ -199,21 +200,18 @@ Chain related commands - `deploy-l2-contracts` — Deploy all l2 contracts - `upgrader` — Deploy Default Upgrader - `deploy-paymaster` — Deploy paymaster smart contract -- `update-token-multiplier-setter` — Update Token Multiplier Setter address on l1 +- `update-token-multiplier-setter` — Update Token Multiplier Setter address on L1 ## `zk_inception chain create` Create a new chain, setting the necessary configurations for later initialization -**Usage:** `zk_inception chain create [OPTIONS] [CHAIN_ID]` - -###### **Arguments:** - -- `` +**Usage:** `zk_inception chain create [OPTIONS]` ###### **Options:** - `--chain-name ` +- `--chain-id ` — Chain ID - `--prover-mode ` — Prover options Possible values: `no-proofs`, `gpu` @@ -393,11 +391,28 @@ Deploy paymaster smart contract ## `zk_inception chain update-token-multiplier-setter` -Update Token Multiplier Setter address on l1. Token Multiplier Setter is used by chains with custom base token to -propagate the changes to numerator / denominator to the l1. Address of the Token Multiplier Setter is taken from the -wallets configuration. +Update Token Multiplier Setter address on L1 + +**Usage:** `zk_inception chain update-token-multiplier-setter [OPTIONS]` + +###### **Options:** + +- `--verify ` — Verify deployed contracts + + Possible values: `true`, `false` + +- `--verifier ` — Verifier to use + + Default value: `etherscan` -**Usage:** `zk_inception chain update-token-multiplier-setter` + Possible values: `etherscan`, `sourcify`, `blockscout`, `oklink` + +- `--verifier-url ` — Verifier URL, if using a custom provider +- `--verifier-api-key ` — Verifier API key +- `--resume` +- `-a`, `--additional-args ` — List of additional arguments that can be passed through the CLI. + + e.g.: `zk_inception init -a --private-key=` ## `zk_inception prover` @@ -428,7 +443,7 @@ Initialize prover - `--project-id ` - `--shall-save-to-public-bucket ` -Possible values: `true`, `false` + Possible values: `true`, `false` - `--public-store-dir ` - `--public-bucket-base-url ` @@ -438,24 +453,43 @@ Possible values: `true`, `false` - `--public-project-id ` - `--bellman-cuda-dir ` - `--download-key ` -- `--setup-database` -- `--use-default` - use default database -- `--dont-drop` - don't drop database -- `--prover-db-url` - URL of database to use -- `--prover-db-name` - Name of database to use -Possible values: `true`, `false` + Possible values: `true`, `false` - `--setup-key-path ` +- `--setup-database ` + + Possible values: `true`, `false` + +- `--prover-db-url ` — Prover database url without database name +- `--prover-db-name ` — Prover database name +- `-u`, `--use-default ` — Use default database urls and names + + Possible values: `true`, `false` + +- `-d`, `--dont-drop ` + + Possible values: `true`, `false` + - `--cloud-type ` Possible values: `gcp`, `local` -## `zk_inception prover generate-sk` +## `zk_inception prover setup-keys` + +Setup keys + +**Usage:** `zk_inception prover setup-keys` + +###### **Options:** + +- `--mode` -Generate setup keys + Possible valuess: `download`, `generate` -**Usage:** `zk_inception prover generate-sk` +- `--region` + + Possible values: `asia`, `europe`, `us` ## `zk_inception prover run` @@ -467,7 +501,12 @@ Run prover - `--component ` - Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor` + Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, + `prover-job-monitor` + +- `--docker` - Whether to run image of the component instead of binary. + + Possible values: `true`, `false` - `--round ` @@ -475,6 +514,9 @@ Run prover - `--threads ` +- `--max-allocation ` - in case you are running prover component, the value limits maximum + memory allocation of it in bytes. + ## `zk_inception prover init-bellman-cuda` Initialize bellman-cuda @@ -549,7 +591,9 @@ Run containers for local development ###### **Options:** -- `-o`, `--observability` — Enable Grafana +- `-o`, `--observability ` — Enable Grafana + + Possible values: `true`, `false` ## `zk_inception contract-verifier` @@ -581,9 +625,21 @@ Download required binaries for contract verifier - `--solc-version ` — Version of solc to install - `--vyper-version ` — Version of vyper to install +## `zk_inception portal` + +Run dapp-portal + +**Usage:** `zk_inception portal [OPTIONS]` + +###### **Options:** + +- `--port ` — The port number for the portal app + + Default value: `3030` + ## `zk_inception update` -Update zkSync +Update ZKsync **Usage:** `zk_inception update [OPTIONS]` diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs index a27b653edf52..d18b05c910e5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs @@ -1,9 +1,7 @@ pub use containers::*; -pub use portal::*; pub use run_server::*; pub use update::*; mod containers; -mod portal; mod run_server; mod update; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs deleted file mode 100644 index e31058aad5d0..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs +++ /dev/null @@ -1,12 +0,0 @@ -use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize, Parser)] -pub struct PortalArgs { - #[clap( - long, - default_value = "3030", - help = "The port number for the portal app" - )] - pub port: u16, -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs index 1e373319ec73..ebe407d4822d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::messages::{ MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_BUILD_HELP, MSG_SERVER_COMPONENTS_HELP, - MSG_SERVER_GENESIS_HELP, + MSG_SERVER_GENESIS_HELP, MSG_SERVER_URING_HELP, }; #[derive(Debug, Serialize, Deserialize, Parser)] @@ -17,4 +17,6 @@ pub struct RunServerArgs { additional_args: Vec, #[clap(long, help = MSG_SERVER_BUILD_HELP)] pub build: bool, + #[clap(help=MSG_SERVER_URING_HELP, long, default_missing_value = "true", num_args = 0..=1)] + pub uring: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 65f809287890..3ea15d10f8be 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -65,6 +65,8 @@ pub struct ChainCreateArgs { base_token_price_denominator: Option, #[clap(long, help = MSG_SET_AS_DEFAULT_HELP, default_missing_value = "true", num_args = 0..=1)] pub(crate) set_as_default: Option, + #[clap(long, default_value = "false")] + pub(crate) legacy_bridge: bool, } impl ChainCreateArgs { @@ -224,6 +226,7 @@ impl ChainCreateArgs { wallet_path, base_token, set_as_default, + legacy_bridge: self.legacy_bridge, }) } } @@ -238,6 +241,7 @@ pub struct ChainCreateArgsFinal { pub wallet_path: Option, pub base_token: BaseToken, pub set_as_default: bool, + pub legacy_bridge: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs index 0700c96c76ec..9dd6c490bd78 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use clap::Parser; use common::{forge::ForgeScriptArgs, Prompt}; use config::ChainConfig; @@ -11,10 +13,35 @@ use crate::{ defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, - MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_PORT_OFFSET_HELP, }, }; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct PortOffset(u16); + +impl PortOffset { + pub fn from_chain_id(chain_id: u16) -> Self { + Self((chain_id - 1) * 100) + } +} + +impl FromStr for PortOffset { + type Err = String; + + fn from_str(s: &str) -> Result { + s.parse::() + .map(PortOffset) + .map_err(|_| "Invalid port offset".to_string()) + } +} + +impl From for u16 { + fn from(port_offset: PortOffset) -> Self { + port_offset.0 + } +} + #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct InitArgs { /// All ethereum environment related arguments @@ -28,6 +55,8 @@ pub struct InitArgs { pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, + #[clap(long, help = MSG_PORT_OFFSET_HELP)] + pub port_offset: Option, } impl InitArgs { @@ -57,6 +86,10 @@ impl InitArgs { genesis_args: self.genesis_args.fill_values_with_prompt(config), deploy_paymaster, l1_rpc_url, + port_offset: self + .port_offset + .unwrap_or(PortOffset::from_chain_id(config.id as u16)) + .into(), } } } @@ -67,4 +100,5 @@ pub struct InitArgsFinal { pub genesis_args: GenesisArgsFinal, pub deploy_paymaster: bool, pub l1_rpc_url: String, + pub port_offset: u16, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs index 04aac872d57e..ebce67cb4ded 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs @@ -60,32 +60,43 @@ pub(crate) fn create_chain_inner( shell: &Shell, wallets: Option, ) -> anyhow::Result<()> { + if args.legacy_bridge { + logger::warn("WARNING!!! You are creating a chain with legacy bridge, use it only for testing compatibility") + } let default_chain_name = args.chain_name.clone(); let chain_path = ecosystem_config.chains.join(&default_chain_name); let chain_configs_path = create_local_configs_dir(shell, &chain_path)?; - let chain_id = ecosystem_config.list_of_chains().len() as u32; + let (chain_id, legacy_bridge) = if args.legacy_bridge { + // Legacy bridge is distinguished by using the same chain id as ecosystem + (ecosystem_config.era_chain_id, Some(true)) + } else { + (L2ChainId::from(args.chain_id), None) + }; + let internal_id = ecosystem_config.list_of_chains().len() as u32; let chain_config = ChainConfig { - id: chain_id, + id: internal_id, name: default_chain_name.clone(), - chain_id: L2ChainId::from(args.chain_id), + chain_id, prover_version: args.prover_version, l1_network: ecosystem_config.l1_network, link_to_code: ecosystem_config.link_to_code.clone(), rocks_db_path: ecosystem_config.get_chain_rocks_db_path(&default_chain_name), + artifacts: ecosystem_config.get_chain_artifacts_path(&default_chain_name), configs: chain_configs_path.clone(), external_node_config_path: None, l1_batch_commit_data_generator_mode: args.l1_batch_commit_data_generator_mode, base_token: args.base_token, wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), + legacy_bridge, }; create_wallets( shell, &chain_config.configs, &ecosystem_config.link_to_code, - chain_id, + internal_id, args.wallet_creation, args.wallet_path, wallets, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 30f361e44af2..7545ec2ec26f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -11,7 +11,7 @@ use config::{ forge_interface::{ deploy_l2_contracts::{ input::DeployL2ContractsInput, - output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + output::{ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput}, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, }, @@ -31,7 +31,8 @@ use crate::{ pub enum Deploy2ContractsOption { All, Upgrader, - IntiailizeBridges, + InitiailizeBridges, + ConsensusRegistry, } pub async fn run( @@ -70,7 +71,17 @@ pub async fn run( ) .await?; } - Deploy2ContractsOption::IntiailizeBridges => { + Deploy2ContractsOption::ConsensusRegistry => { + deploy_consensus_registry( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } + Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, &chain_config, @@ -88,6 +99,25 @@ pub async fn run( Ok(()) } +/// Build the L2 contracts, deploy one or all of them with `forge`, then update the config +/// by reading one or all outputs written by the deploy scripts. +async fn build_and_deploy( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + forge_args: ForgeScriptArgs, + signature: Option<&str>, + mut update_config: impl FnMut(&Shell, &Path) -> anyhow::Result<()>, +) -> anyhow::Result<()> { + build_l2_contracts(shell, &ecosystem_config.link_to_code)?; + call_forge(shell, chain_config, ecosystem_config, forge_args, signature).await?; + update_config( + shell, + &DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + Ok(()) +} + pub async fn initialize_bridges( shell: &Shell, chain_config: &ChainConfig, @@ -95,22 +125,22 @@ pub async fn initialize_bridges( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + let signature = if let Some(true) = chain_config.legacy_bridge { + Some("runDeployLegacySharedBridge") + } else { + Some("runDeploySharedBridge") + }; + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, - Some("runDeploySharedBridge"), + signature, + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?) + }, ) - .await?; - let output = InitializeBridgeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; - Ok(()) + .await } pub async fn deploy_upgrader( @@ -120,48 +150,65 @@ pub async fn deploy_upgrader( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, Some("runDefaultUpgrader"), + |shell, out| { + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?) + }, ) - .await?; - let output = DefaultL2UpgradeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - Ok(()) + .await } -pub async fn deploy_l2_contracts( +pub async fn deploy_consensus_registry( shell: &Shell, chain_config: &ChainConfig, ecosystem_config: &EcosystemConfig, contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge(shell, chain_config, ecosystem_config, forge_args, None).await?; - let output = InitializeBridgeOutput::read( + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; + chain_config, + ecosystem_config, + forge_args, + Some("runDeployConsensusRegistry"), + |shell, out| { + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?) + }, + ) + .await +} - let output = DefaultL2UpgradeOutput::read( +pub async fn deploy_l2_contracts( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + let signature = if let Some(true) = chain_config.legacy_bridge { + Some("runWithLegacyBridge") + } else { + None + }; + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - - Ok(()) + chain_config, + ecosystem_config, + forge_args, + signature, + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; + Ok(()) + }, + ) + .await } async fn call_forge( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index 2436322a5291..2f027f676a93 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -9,10 +9,10 @@ use common::{ spinner::Spinner, }; use config::{ - set_databases, set_rocks_db_config, + set_databases, set_file_artifacts, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, EcosystemConfig, GeneralConfig, GenesisConfig, SecretsConfig, - WalletsConfig, + ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, + SecretsConfig, WalletsConfig, }; use types::ProverMode; use xshell::Shell; @@ -58,7 +58,9 @@ pub async fn genesis( let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; let mut general = config.get_general_config()?; + let file_artifacts = FileArtifacts::new(config.artifacts.clone()); set_rocks_db_config(&mut general, rocks_db)?; + set_file_artifacts(&mut general, file_artifacts); if config.prover_version != ProverMode::NoProofs { general .eth @@ -78,7 +80,12 @@ pub async fn genesis( .sender .as_mut() .context("sender")? - .pubdata_sending_mode = PubdataSendingMode::Custom + .pubdata_sending_mode = PubdataSendingMode::Custom; + general + .state_keeper_config + .as_mut() + .context("state_keeper_config")? + .pubdata_overhead_part = 0.0; } general.save_with_base_path(shell, &config.configs)?; @@ -100,7 +107,7 @@ pub async fn genesis( } fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone()); + let server = Server::new(None, chain_config.link_to_code.clone(), false); server .run( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 9d1c0d543ee0..734e5e54863b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -1,4 +1,4 @@ -use anyhow::Context; +use anyhow::{bail, Context}; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, @@ -11,9 +11,10 @@ use config::{ register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, script_params::REGISTER_CHAIN_SCRIPT_PARAMS, }, - set_l1_rpc_url, + ports_config, set_l1_rpc_url, traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, - update_from_chain_config, ChainConfig, ContractsConfig, EcosystemConfig, + update_from_chain_config, update_ports, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, }; use types::{BaseToken, L1Network, WalletCreation}; use xshell::Shell; @@ -26,18 +27,22 @@ use crate::{ deploy_l2_contracts, deploy_paymaster, genesis::genesis, set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, - portal::create_and_save_portal_config, + portal::update_portal_config, }, consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, - MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, - MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, + MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, + }, + utils::{ + consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + forge::{check_the_balance, fill_forge_private_key}, }, - utils::forge::{check_the_balance, fill_forge_private_key}, }; pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { @@ -66,6 +71,16 @@ pub async fn init( ) -> anyhow::Result<()> { copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + let mut general_config = chain_config.get_general_config()?; + apply_port_offset(init_args.port_offset, &mut general_config)?; + let ports = ports_config(&general_config).context(MSG_PORTS_CONFIG_ERR)?; + + let consensus_keys = generate_consensus_keys(); + let consensus_config = + get_consensus_config(chain_config, ports, Some(consensus_keys.clone()), None)?; + general_config.consensus_config = Some(consensus_config); + general_config.save_with_base_path(shell, &chain_config.configs)?; + let mut genesis_config = chain_config.get_genesis_config()?; update_from_chain_config(&mut genesis_config, chain_config); genesis_config.save_with_base_path(shell, &chain_config.configs)?; @@ -80,6 +95,7 @@ pub async fn init( let mut secrets = chain_config.get_secrets_config()?; set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); secrets.save_with_base_path(shell, &chain_config.configs)?; let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); @@ -107,22 +123,25 @@ pub async fn init( .await?; spinner.finish(); - let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); - set_token_multiplier_setter( - shell, - ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), - contracts_config.l1.chain_admin_addr, - ecosystem_config - .get_wallets() - .unwrap() - .token_multiplier_setter - .address, - &init_args.forge_args.clone(), - init_args.l1_rpc_url.clone(), - ) - .await?; - spinner.finish(); + if chain_config.base_token != BaseToken::eth() { + let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); + set_token_multiplier_setter( + shell, + ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + contracts_config.l1.chain_admin_addr, + chain_config + .get_wallets_config() + .unwrap() + .token_multiplier_setter + .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? + .address, + &init_args.forge_args.clone(), + init_args.l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + } deploy_l2_contracts::deploy_l2_contracts( shell, @@ -134,6 +153,17 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + if let Some(true) = chain_config.legacy_bridge { + setup_legacy_bridge( + shell, + chain_config, + ecosystem_config, + &contracts_config, + init_args.forge_args.clone(), + ) + .await?; + } + if init_args.deploy_paymaster { deploy_paymaster::deploy_paymaster( shell, @@ -149,7 +179,7 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - create_and_save_portal_config(ecosystem_config, shell) + update_portal_config(shell, chain_config) .await .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; @@ -249,3 +279,15 @@ pub async fn mint_base_token( } Ok(()) } + +fn apply_port_offset(port_offset: u16, general_config: &mut GeneralConfig) -> anyhow::Result<()> { + let Some(mut ports_config) = ports_config(general_config) else { + bail!("Missing ports config"); + }; + + ports_config.apply_offset(port_offset); + + update_ports(general_config, &ports_config)?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index dbddc923336a..61a164c16553 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -16,6 +16,7 @@ pub mod deploy_paymaster; pub mod genesis; pub(crate) mod init; mod set_token_multiplier_setter; +mod setup_legacy_bridge; #[derive(Subcommand, Debug)] pub enum ChainCommands { @@ -31,6 +32,9 @@ pub enum ChainCommands { /// Deploy all l2 contracts #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Deploy L2 consensus registry + #[command(alias = "consensus")] + DeployConsensusRegistry(ForgeScriptArgs), /// Deploy Default Upgrader Upgrader(ForgeScriptArgs), /// Deploy paymaster smart contract @@ -48,11 +52,14 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::DeployConsensusRegistry(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await + } ChainCommands::Upgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { - deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::IntiailizeBridges).await + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::InitiailizeBridges).await } ChainCommands::DeployPaymaster(args) => deploy_paymaster::run(args, shell).await, ChainCommands::UpdateTokenMultiplierSetter(args) => { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index 0ab0d451f1f7..f92391c22f47 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -15,7 +15,7 @@ use crate::{ messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED, MSG_TOKEN_MULTIPLIER_SETTER_UPDATED_TO, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, - MSG_WALLETS_CONFIG_MUST_BE_PRESENT, + MSG_WALLETS_CONFIG_MUST_BE_PRESENT, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -47,6 +47,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .get_wallets() .context(MSG_WALLETS_CONFIG_MUST_BE_PRESENT)? .token_multiplier_setter + .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? .address; let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs new file mode 100644 index 000000000000..925014fe4e61 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + script_params::SETUP_LEGACY_BRIDGE, setup_legacy_bridge::SetupLegacyBridgeInput, + }, + traits::SaveConfig, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{MSG_DEPLOYING_PAYMASTER, MSG_L1_SECRETS_MUST_BE_PRESENTED}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn setup_legacy_bridge( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + let input = SetupLegacyBridgeInput { + bridgehub: contracts_config.ecosystem_contracts.bridgehub_proxy_addr, + diamond_proxy: contracts_config.l1.diamond_proxy_addr, + shared_bridge_proxy: contracts_config.bridges.shared.l1_address, + transparent_proxy_admin: contracts_config + .ecosystem_contracts + .transparent_proxy_admin_addr, + erc20bridge_proxy: contracts_config.bridges.erc20.l1_address, + token_weth_address: Default::default(), + chain_id: chain_config.chain_id, + l2shared_bridge_address: contracts_config + .bridges + .shared + .l2_address + .expect("Not fully initialized"), + create2factory_salt: contracts_config.create2_factory_salt, + create2factory_addr: contracts_config.create2_factory_addr, + }; + let foundry_contracts_path = chain_config.path_to_foundry(); + input.save(shell, SETUP_LEGACY_BRIDGE.input(&chain_config.link_to_code))?; + let secrets = chain_config.get_secrets_config()?; + + let mut forge = Forge::new(&foundry_contracts_path) + .script(&SETUP_LEGACY_BRIDGE.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url( + secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(), + ) + .with_broadcast(); + + forge = fill_forge_private_key( + forge, + ecosystem_config.get_wallets()?.governor_private_key(), + )?; + + let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); + check_the_balance(&forge).await?; + forge.run(shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index 17c32c04bc2f..81d7970df839 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -40,7 +40,7 @@ pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow:: } fn start_container(shell: &Shell, compose_file: &str, retry_msg: &str) -> anyhow::Result<()> { - while let Err(err) = docker::up(shell, compose_file) { + while let Err(err) = docker::up(shell, compose_file, true) { logger::error(err.to_string()); if !common::PromptConfirm::new(retry_msg).default(true).ask() { return Err(err); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs index 1ae06c810ba1..32049aa0a902 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{cmd::Cmd, logger}; +use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -10,7 +10,7 @@ use crate::messages::{ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem - .load_chain(Some(ecosystem.default_chain.clone())) + .load_chain(global_config().chain_name.clone()) .context(MSG_CHAIN_NOT_FOUND_ERR)?; let config_path = chain.path_to_general_config(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs index 4f1d5b827432..57a63e1f1940 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs @@ -16,7 +16,10 @@ use crate::{ containers::{initialize_docker, start_containers}, ecosystem::{ args::create::EcosystemCreateArgs, - create_configs::{create_erc20_deployment_config, create_initial_deployments_config}, + create_configs::{ + create_apps_config, create_erc20_deployment_config, + create_initial_deployments_config, + }, }, }, messages::{ @@ -76,6 +79,7 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { create_initial_deployments_config(shell, &configs_path)?; create_erc20_deployment_config(shell, &configs_path)?; + create_apps_config(shell, &configs_path)?; let ecosystem_config = EcosystemConfig { name: ecosystem_name.clone(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs index b4f42313e3d0..38358355ff97 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs @@ -2,7 +2,8 @@ use std::path::Path; use config::{ forge_interface::deploy_ecosystem::input::{Erc20DeploymentConfig, InitialDeploymentConfig}, - traits::SaveConfigWithCommentAndBasePath, + traits::{SaveConfigWithBasePath, SaveConfigWithCommentAndBasePath}, + AppsEcosystemConfig, }; use xshell::Shell; @@ -33,3 +34,12 @@ pub fn create_erc20_deployment_config( )?; Ok(config) } + +pub fn create_apps_config( + shell: &Shell, + ecosystem_configs_path: &Path, +) -> anyhow::Result { + let config = AppsEcosystemConfig::default(); + config.save_with_base_path(shell, ecosystem_configs_path)?; + Ok(config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 85ae1c5aa0fe..360734c0ab8a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -36,9 +36,9 @@ use super::{ setup_observability, }; use crate::{ - accept_ownership::accept_owner, + accept_ownership::{accept_admin, accept_owner}, commands::{ - chain, + chain::{self, args::init::PortOffset}, ecosystem::create_configs::{ create_erc20_deployment_config, create_initial_deployments_config, }, @@ -120,6 +120,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { genesis_args: genesis_args.clone().fill_values_with_prompt(&chain_config), deploy_paymaster: final_ecosystem_args.deploy_paymaster, l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), + port_offset: PortOffset::from_chain_id(chain_config.id as u16).into(), }; chain::init::init( @@ -338,6 +339,17 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config.ecosystem_contracts.bridgehub_proxy_addr, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + accept_owner( shell, config, @@ -349,6 +361,17 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config.bridges.shared.l1_address, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + accept_owner( shell, config, @@ -362,6 +385,19 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config + .ecosystem_contracts + .state_transition_proxy_addr, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + Ok(contracts_config) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs new file mode 100644 index 000000000000..6fdd3faa9807 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker}; +use config::{explorer_compose::ExplorerBackendComposeConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::messages::{ + msg_explorer_chain_not_initialized, MSG_CHAIN_NOT_FOUND_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let chain_name = chain_config.name.clone(); + // Read chain-level explorer backend docker compose file + let ecosystem_path = shell.current_dir(); + let backend_config_path = + ExplorerBackendComposeConfig::get_config_path(&ecosystem_path, &chain_config.name); + if !backend_config_path.exists() { + anyhow::bail!(msg_explorer_chain_not_initialized(&chain_name)); + } + // Run docker compose + run_backend(shell, &backend_config_path)?; + Ok(()) +} + +fn run_backend(shell: &Shell, explorer_compose_config_path: &Path) -> anyhow::Result<()> { + if let Some(docker_compose_file) = explorer_compose_config_path.to_str() { + docker::up(shell, docker_compose_file, false) + .context(MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR)?; + } else { + anyhow::bail!("Invalid docker compose file"); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs new file mode 100644 index 000000000000..43700d91a0df --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs @@ -0,0 +1,135 @@ +use anyhow::Context; +use common::{config::global_config, db, logger, Prompt}; +use config::{ + explorer::{ExplorerChainConfig, ExplorerConfig}, + explorer_compose::{ExplorerBackendComposeConfig, ExplorerBackendConfig, ExplorerBackendPorts}, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + ChainConfig, EcosystemConfig, +}; +use slugify_rs::slugify; +use url::Url; +use xshell::Shell; + +use crate::{ + commands::chain::args::init::PortOffset, + consts::L2_BASE_TOKEN_ADDRESS, + defaults::{generate_explorer_db_name, DATABASE_EXPLORER_URL}, + messages::{ + msg_chain_load_err, msg_explorer_db_name_prompt, msg_explorer_db_url_prompt, + msg_explorer_initializing_database_for, MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR, + MSG_EXPLORER_INITIALIZED, + }, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + // If specific chain is provided, initialize only that chain; otherwise, initialize all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + // Initialize chains one by one + let mut explorer_config = ExplorerConfig::read_or_create_default(shell)?; + for chain_name in chains_enabled.iter() { + // Load chain config + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + // Build backend config - parameters required to create explorer backend services + let backend_config = build_backend_config(&chain_config); + // Initialize explorer database + initialize_explorer_database(&backend_config.database_url).await?; + // Create explorer backend docker compose file + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; + let backend_compose_config = + ExplorerBackendComposeConfig::new(chain_name, l2_rpc_url, &backend_config)?; + let backend_compose_config_path = + ExplorerBackendComposeConfig::get_config_path(&shell.current_dir(), chain_name); + backend_compose_config.save(shell, &backend_compose_config_path)?; + // Add chain to explorer.json + let explorer_chain_config = build_explorer_chain_config(&chain_config, &backend_config)?; + explorer_config.add_chain_config(&explorer_chain_config); + } + // Save explorer config + let config_path = ExplorerConfig::get_config_path(&shell.current_dir()); + explorer_config.save(shell, config_path)?; + + logger::outro(MSG_EXPLORER_INITIALIZED); + Ok(()) +} + +fn build_backend_config(chain_config: &ChainConfig) -> ExplorerBackendConfig { + // Prompt explorer database name + logger::info(msg_explorer_initializing_database_for(&chain_config.name)); + let db_config = fill_database_values_with_prompt(chain_config); + + // Allocate ports for backend services + let backend_ports = allocate_explorer_services_ports(chain_config); + + // Build explorer backend config + ExplorerBackendConfig::new(db_config.full_url(), &backend_ports) +} + +async fn initialize_explorer_database(db_url: &Url) -> anyhow::Result<()> { + let db_config = db::DatabaseConfig::from_url(db_url)?; + db::drop_db_if_exists(&db_config) + .await + .context(MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR)?; + db::init_db(&db_config).await?; + Ok(()) +} + +fn fill_database_values_with_prompt(config: &ChainConfig) -> db::DatabaseConfig { + let defaul_db_name: String = generate_explorer_db_name(config); + let chain_name = config.name.clone(); + let explorer_db_url = Prompt::new(&msg_explorer_db_url_prompt(&chain_name)) + .default(DATABASE_EXPLORER_URL.as_str()) + .ask(); + let explorer_db_name: String = Prompt::new(&msg_explorer_db_name_prompt(&chain_name)) + .default(&defaul_db_name) + .ask(); + let explorer_db_name = slugify!(&explorer_db_name, separator = "_"); + db::DatabaseConfig::new(explorer_db_url, explorer_db_name) +} + +fn allocate_explorer_services_ports(chain_config: &ChainConfig) -> ExplorerBackendPorts { + // Try to allocate intuitive ports with an offset from the defaults + let offset: u16 = PortOffset::from_chain_id(chain_config.id as u16).into(); + ExplorerBackendPorts::default().with_offset(offset) +} + +fn build_explorer_chain_config( + chain_config: &ChainConfig, + backend_config: &ExplorerBackendConfig, +) -> anyhow::Result { + let general_config = chain_config.get_general_config()?; + // Get L2 RPC URL from general config + let l2_rpc_url = general_config.get_l2_rpc_url()?; + // Get Verification API URL from general config + let verification_api_url = general_config + .contract_verifier + .as_ref() + .map(|verifier| &verifier.url) + .context("verification_url")?; + // Build API URL + let api_port = backend_config.ports.api_http_port; + let api_url = format!("http://127.0.0.1:{}", api_port); + + // Build explorer chain config + Ok(ExplorerChainConfig { + name: chain_config.name.clone(), + l2_network_name: chain_config.name.clone(), + l2_chain_id: chain_config.chain_id.as_u64(), + rpc_url: l2_rpc_url.to_string(), + api_url: api_url.to_string(), + base_token_address: L2_BASE_TOKEN_ADDRESS.to_string(), + hostnames: Vec::new(), + icon: "/images/icons/zksync-arrows.svg".to_string(), + maintenance: false, + published: true, + bridge_url: None, + l1_explorer_url: None, + verification_api_url: Some(verification_api_url.to_string()), + other: serde_json::Value::Null, + }) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs new file mode 100644 index 000000000000..4b66d49598c4 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs @@ -0,0 +1,27 @@ +use clap::Subcommand; +use xshell::Shell; + +mod backend; +mod init; +mod run; + +#[derive(Subcommand, Debug)] +pub enum ExplorerCommands { + /// Initialize explorer (create database to store explorer data and generate docker + /// compose file with explorer services). Runs for all chains, unless --chain is passed + Init, + /// Start explorer backend services (api, data_fetcher, worker) for a given chain. + /// Uses default chain, unless --chain is passed + #[command(alias = "backend")] + RunBackend, + /// Run explorer app + Run, +} + +pub(crate) async fn run(shell: &Shell, args: ExplorerCommands) -> anyhow::Result<()> { + match args { + ExplorerCommands::Init => init::run(shell).await, + ExplorerCommands::Run => run::run(shell), + ExplorerCommands::RunBackend => backend::run(shell), + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs new file mode 100644 index 000000000000..a6519f62edba --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs @@ -0,0 +1,98 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker, logger}; +use config::{explorer::*, traits::SaveConfig, AppsEcosystemConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + consts::{EXPLORER_APP_DOCKER_CONFIG_PATH, EXPLORER_APP_DOCKER_IMAGE}, + messages::{ + msg_explorer_running_with_config, msg_explorer_starting_on, + MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR, MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR, + }, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_path = shell.current_dir(); + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // If specific_chain is provided, run only with that chain; otherwise, run with all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + + // Read explorer config + let config_path = ExplorerConfig::get_config_path(&ecosystem_path); + let mut explorer_config = ExplorerConfig::read_or_create_default(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update explorer config + explorer_config.filter(&ecosystem_config.list_of_chains()); + explorer_config.hide_except(&chains_enabled); + if explorer_config.is_empty() { + anyhow::bail!(MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR); + } + + // Save explorer config + explorer_config.save(shell, &config_path)?; + + let config_js_path = explorer_config + .save_as_js(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_explorer_running_with_config(&config_path)); + logger::info(msg_explorer_starting_on( + "127.0.0.1", + apps_config.explorer.http_port, + )); + let name = explorer_app_name(&ecosystem_config.name); + run_explorer( + shell, + &config_js_path, + &name, + apps_config.explorer.http_port, + )?; + Ok(()) +} + +fn run_explorer( + shell: &Shell, + config_file_path: &Path, + name: &str, + port: u16, +) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + EXPLORER_APP_DOCKER_CONFIG_PATH + ); + + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; + + docker::run(shell, EXPLORER_APP_DOCKER_IMAGE, docker_args) + .with_context(|| MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR)?; + Ok(()) +} + +/// Generates a name for the explorer app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn explorer_app_name(ecosystem_name: &str) -> String { + format!("{}-explorer-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs index 1bc0c06728d7..cd6ff4c71534 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP}; +use crate::messages::{ + MSG_ENABLE_CONSENSUS_HELP, MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RunExternalNodeArgs { @@ -9,6 +11,8 @@ pub struct RunExternalNodeArgs { pub reinit: bool, #[clap(long, help = MSG_SERVER_COMPONENTS_HELP)] pub components: Option>, + #[clap(long, help = MSG_ENABLE_CONSENSUS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub enable_consensus: Option, #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false, help = MSG_SERVER_ADDITIONAL_ARGS_HELP)] pub additional_args: Vec, diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 51101c228878..89e08418c6e5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -1,4 +1,4 @@ -use std::{path::Path, str::FromStr}; +use std::{collections::BTreeMap, path::Path, str::FromStr}; use anyhow::Context; use common::{config::global_config, logger}; @@ -8,14 +8,24 @@ use config::{ }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; -use zksync_config::configs::{DatabaseSecrets, L1Secrets}; +use zksync_config::configs::{ + consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + DatabaseSecrets, L1Secrets, +}; +use zksync_consensus_crypto::TextFmt; +use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, messages::{ - msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_PREPARING_EN_CONFIGS, + msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, + MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, + MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PORTS_CONFIG_ERR, MSG_PREPARING_EN_CONFIGS, + }, + utils::{ + consensus::{get_consensus_config, node_public_key}, + rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, - utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }; pub fn run(shell: &Shell, args: PrepareConfigArgs) -> anyhow::Result<()> { @@ -64,15 +74,45 @@ fn prepare_configs( gateway_url: None, }; let mut general_en = general.clone(); + let next_empty_ports_config = ports_config(&general) + .context(MSG_PORTS_CONFIG_ERR)? + .next_empty_ports_config(); + update_ports(&mut general_en, &next_empty_ports_config)?; + + // Set consensus config + let main_node_consensus_config = general + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; + + let mut gossip_static_outbound = BTreeMap::new(); + let main_node_public_key = node_public_key( + &config + .get_secrets_config()? + .consensus + .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, + )? + .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - update_ports( - &mut general_en, - &ports_config(&general) - .context("da")? - .next_empty_ports_config(), + gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); + + let en_consensus_config = get_consensus_config( + config, + next_empty_ports_config, + None, + Some(gossip_static_outbound), )?; + general_en.consensus_config = Some(en_consensus_config.clone()); + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + + // Set secrets config + let node_key = roles::node::SecretKey::generate().encode(); + let consensus_secrets = ConsensusSecrets { + validator_key: None, + attester_key: None, + node_key: Some(NodeSecretKey(Secret::new(node_key))), + }; let secrets = SecretsConfig { - consensus: None, + consensus: Some(consensus_secrets), database: Some(DatabaseSecrets { server_url: Some(args.db.full_url().into()), prover_url: None, diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs index 9d3da4663859..46328699e6d4 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs @@ -32,6 +32,7 @@ async fn run_external_node( if args.reinit { init::init(shell, chain_config).await? } + let enable_consensus = args.enable_consensus.unwrap_or(false); let server = RunExternalNode::new(args.components.clone(), chain_config)?; - server.run(shell, args.additional_args.clone()) + server.run(shell, enable_consensus, args.additional_args.clone()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index 6276601860a4..12e5d0931321 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod containers; pub mod contract_verifier; pub mod ecosystem; +pub mod explorer; pub mod external_node; pub mod portal; pub mod prover; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/portal.rs index cc939f3fb3ea..5bf211211779 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/portal.rs @@ -1,33 +1,30 @@ -use std::{collections::HashMap, path::Path}; +use std::path::Path; -use anyhow::{anyhow, Context}; -use common::{docker, ethereum, logger}; +use anyhow::Context; +use common::{config::global_config, docker, ethereum, logger}; use config::{ portal::*, - traits::{ReadConfig, SaveConfig}, - ChainConfig, EcosystemConfig, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + AppsEcosystemConfig, ChainConfig, EcosystemConfig, }; use ethers::types::Address; use types::{BaseToken, TokenInfo}; use xshell::Shell; use crate::{ - commands::args::PortalArgs, - consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONTAINER_PORT, PORTAL_DOCKER_IMAGE}, + consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONFIG_PATH, PORTAL_DOCKER_IMAGE}, messages::{ - msg_portal_starting_on, MSG_PORTAL_CONFIG_IS_EMPTY_ERR, - MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, + msg_portal_running_with_config, msg_portal_starting_on, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, }, }; -async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result { +async fn build_portal_chain_config( + chain_config: &ChainConfig, +) -> anyhow::Result { // Get L2 RPC URL from general config - let general_config = chain_config.get_general_config()?; - let rpc_url = general_config - .api_config - .as_ref() - .map(|api_config| &api_config.web3_json_rpc.http_url) - .context("api_config")?; + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; // Get L1 RPC URL from secrects config let secrets_config = chain_config.get_secrets_config()?; let l1_rpc_url = secrets_config @@ -68,97 +65,126 @@ async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result< name: Some(base_token_info.name.to_string()), }]; // Build hyperchain config - Ok(HyperchainConfig { + Ok(PortalChainConfig { network: NetworkConfig { id: chain_config.chain_id.as_u64(), key: chain_config.name.clone(), name: chain_config.name.clone(), - rpc_url: rpc_url.to_string(), + rpc_url: l2_rpc_url.to_string(), l1_network, public_l1_network_id: None, block_explorer_url: None, block_explorer_api: None, + hidden: None, + other: serde_json::Value::Null, }, tokens, }) } -async fn create_hyperchains_config( - chain_configs: &[ChainConfig], -) -> anyhow::Result { - let mut hyperchain_configs = Vec::new(); - for chain_config in chain_configs { - if let Ok(config) = create_hyperchain_config(chain_config).await { - hyperchain_configs.push(config) - } - } - Ok(HyperchainsConfig(hyperchain_configs)) +pub async fn update_portal_config( + shell: &Shell, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Build and append portal chain config to the portal config + let portal_chain_config = build_portal_chain_config(chain_config).await?; + let mut portal_config = PortalConfig::read_or_create_default(shell)?; + portal_config.add_chain_config(&portal_chain_config); + // Save portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + portal_config.save(shell, config_path)?; + Ok(portal_config) } -pub async fn create_portal_config( +/// Validates portal config - appends missing chains and removes unknown chains +async fn validate_portal_config( + portal_config: &mut PortalConfig, ecosystem_config: &EcosystemConfig, -) -> anyhow::Result { - let chains: Vec = ecosystem_config.list_of_chains(); - let mut chain_configs = Vec::new(); - for chain in chains { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain.clone())) { - chain_configs.push(chain_config) +) -> anyhow::Result<()> { + let chain_names = ecosystem_config.list_of_chains(); + for chain_name in &chain_names { + if portal_config.contains(chain_name) { + continue; + } + // Append missing chain, chain might not be initialized, so ignoring errors + if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { + portal_config.add_chain_config(&portal_chain_config); + } } } - let hyperchains_config = create_hyperchains_config(&chain_configs).await?; - if hyperchains_config.is_empty() { - anyhow::bail!("Failed to create any valid hyperchain config") - } - let runtime_config = PortalRuntimeConfig { - node_type: "hyperchain".to_string(), - hyperchains_config, - }; - Ok(runtime_config) -} - -pub async fn create_and_save_portal_config( - ecosystem_config: &EcosystemConfig, - shell: &Shell, -) -> anyhow::Result { - let portal_config = create_portal_config(ecosystem_config).await?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - portal_config.save(shell, config_path)?; - Ok(portal_config) + portal_config.filter(&chain_names); + Ok(()) } -pub async fn run(shell: &Shell, args: PortalArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config: EcosystemConfig = EcosystemConfig::from_file(shell)?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - logger::info(format!( - "Using portal config file at {}", - config_path.display() - )); - - let portal_config = match PortalRuntimeConfig::read(shell, &config_path) { - Ok(config) => config, - Err(_) => create_and_save_portal_config(&ecosystem_config, shell) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?, + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // Display all chains, unless --chain is passed + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), }; - if portal_config.hyperchains_config.is_empty() { - return Err(anyhow!(MSG_PORTAL_CONFIG_IS_EMPTY_ERR)); + + // Read portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + let mut portal_config = PortalConfig::read_or_create_default(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update portal config + validate_portal_config(&mut portal_config, &ecosystem_config).await?; + portal_config.hide_except(&chains_enabled); + if portal_config.is_empty() { + anyhow::bail!(MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR); } - logger::info(msg_portal_starting_on("127.0.0.1", args.port)); - run_portal(shell, &config_path, args.port)?; + // Save portal config + portal_config.save(shell, &config_path)?; + + let config_js_path = portal_config + .save_as_js(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_portal_running_with_config(&config_path)); + logger::info(msg_portal_starting_on( + "127.0.0.1", + apps_config.portal.http_port, + )); + let name = portal_app_name(&ecosystem_config.name); + run_portal(shell, &config_js_path, &name, apps_config.portal.http_port)?; Ok(()) } -fn run_portal(shell: &Shell, config_file_path: &Path, port: u16) -> anyhow::Result<()> { - let port_mapping = format!("{}:{}", port, PORTAL_DOCKER_CONTAINER_PORT); - let volume_mapping = format!("{}:/usr/src/app/dist/config.js", config_file_path.display()); +fn run_portal(shell: &Shell, config_file_path: &Path, name: &str, port: u16) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + PORTAL_DOCKER_CONFIG_PATH + ); - let mut docker_args: HashMap = HashMap::new(); - docker_args.insert("--platform".to_string(), "linux/amd64".to_string()); - docker_args.insert("-p".to_string(), port_mapping); - docker_args.insert("-v".to_string(), volume_mapping); + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; docker::run(shell, PORTAL_DOCKER_IMAGE, docker_args) .with_context(|| MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR)?; Ok(()) } + +/// Generates a name for the portal app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn portal_app_name(ecosystem_name: &str) -> String { + format!("{}-portal-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs new file mode 100644 index 000000000000..095dccf00b38 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs @@ -0,0 +1,22 @@ +use clap::Parser; +use common::Prompt; + +use crate::messages::MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT; + +#[derive(Debug, Clone, Parser, Default)] +pub struct CompressorKeysArgs { + #[clap(long)] + pub path: Option, +} + +impl CompressorKeysArgs { + pub fn fill_values_with_prompt(self, default: &str) -> CompressorKeysArgs { + let path = self.path.unwrap_or_else(|| { + Prompt::new(MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT) + .default(default) + .ask() + }); + + CompressorKeysArgs { path: Some(path) } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs index e8c9cf1888d5..94fea1389d28 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -8,7 +8,10 @@ use url::Url; use xshell::Shell; use zksync_config::configs::fri_prover::CloudConnectionMode; -use super::init_bellman_cuda::InitBellmanCudaArgs; +use super::{ + compressor_keys::CompressorKeysArgs, init_bellman_cuda::InitBellmanCudaArgs, + setup_keys::SetupKeysArgs, +}; use crate::{ commands::prover::gcs::get_project_ids, consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, @@ -18,25 +21,24 @@ use crate::{ MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, - MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, - MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, + MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, + MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_INITIALIZE_BELLMAN_CUDA_PROMPT, + MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_PROVER_DB_NAME_HELP, - MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEY_PATH_PROMPT, + MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEYS_PROMPT, MSG_USE_DEFAULT_DATABASES_HELP, }, }; -#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +#[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { // Proof store object #[clap(long)] pub proof_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub proof_store_gcs_config: ProofStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub create_gcs_bucket_config: ProofStorageGCSCreateBucketTmp, // Public store object @@ -45,20 +47,25 @@ pub struct ProverInitArgs { #[clap(long)] pub public_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub public_store_gcs_config: PublicStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub public_create_gcs_bucket_config: PublicStorageGCSCreateBucketTmp, // Bellman cuda #[clap(flatten)] - #[serde(flatten)] pub bellman_cuda_config: InitBellmanCudaArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub bellman_cuda: Option, + + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_compressor_keys: Option, + #[clap(flatten)] + pub compressor_keys_args: CompressorKeysArgs, #[clap(flatten)] - #[serde(flatten)] - pub setup_key_config: SetupKeyConfigTmp, + pub setup_keys_args: SetupKeysArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_keys: Option, #[clap(long)] pub setup_database: Option, @@ -137,7 +144,7 @@ pub struct PublicStorageGCSCreateBucketTmp { } #[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] -pub struct SetupKeyConfigTmp { +pub struct SetupCompressorKeyConfigTmp { #[clap(long)] pub download_key: Option, #[clap(long)] @@ -171,12 +178,6 @@ pub enum ProofStorageConfig { GCSCreateBucket(ProofStorageGCSCreateBucket), } -#[derive(Debug, Clone)] -pub struct SetupKeyConfig { - pub download_key: bool, - pub setup_key_path: String, -} - #[derive(Debug, Clone)] pub struct ProverDatabaseConfig { pub database_config: DatabaseConfig, @@ -187,8 +188,9 @@ pub struct ProverDatabaseConfig { pub struct ProverInitArgsFinal { pub proof_store: ProofStorageConfig, pub public_store: Option, - pub setup_key_config: SetupKeyConfig, - pub bellman_cuda_config: InitBellmanCudaArgs, + pub compressor_key_args: Option, + pub setup_keys: Option, + pub bellman_cuda_config: Option, pub cloud_type: CloudConnectionMode, pub database_config: Option, } @@ -197,20 +199,23 @@ impl ProverInitArgs { pub(crate) fn fill_values_with_prompt( &self, shell: &Shell, - setup_key_path: &str, + default_compressor_key_path: &str, chain_config: &ChainConfig, ) -> anyhow::Result { let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; let public_store = self.fill_public_storage_values_with_prompt(shell)?; - let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); - let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; + let compressor_key_args = + self.fill_setup_compressor_key_values_with_prompt(default_compressor_key_path); + let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt(); let cloud_type = self.get_cloud_type_with_prompt(); let database_config = self.fill_database_values_with_prompt(chain_config); + let setup_keys = self.fill_setup_keys_values_with_prompt(); Ok(ProverInitArgsFinal { proof_store, public_store, - setup_key_config, + compressor_key_args, + setup_keys, bellman_cuda_config, cloud_type, database_config, @@ -336,29 +341,38 @@ impl ProverInitArgs { } } - fn fill_setup_key_values_with_prompt(&self, setup_key_path: &str) -> SetupKeyConfig { - let download_key = self - .clone() - .setup_key_config - .download_key - .unwrap_or_else(|| { - PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT) - .default(true) - .ask() - }); - let setup_key_path = self - .clone() - .setup_key_config - .setup_key_path - .unwrap_or_else(|| { - Prompt::new(MSG_SETUP_KEY_PATH_PROMPT) - .default(setup_key_path) - .ask() - }); + fn fill_setup_compressor_key_values_with_prompt( + &self, + default_path: &str, + ) -> Option { + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) + .default(false) + .ask() + }); - SetupKeyConfig { - download_key, - setup_key_path, + if download_key { + Some( + self.compressor_keys_args + .clone() + .fill_values_with_prompt(default_path), + ) + } else { + None + } + } + + fn fill_setup_keys_values_with_prompt(&self) -> Option { + let args = self.setup_keys_args.clone(); + + if self.setup_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_SETUP_KEYS_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None } } @@ -460,8 +474,17 @@ impl ProverInitArgs { }) } - fn fill_bellman_cuda_values_with_prompt(&self) -> anyhow::Result { - self.bellman_cuda_config.clone().fill_values_with_prompt() + fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + let args = self.bellman_cuda_config.clone(); + if self.bellman_cuda.unwrap_or_else(|| { + PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None + } } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs index 848457c53271..ba204b0be9e9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs @@ -30,7 +30,7 @@ impl std::fmt::Display for BellmanCudaPathSelection { } impl InitBellmanCudaArgs { - pub fn fill_values_with_prompt(self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { match PromptSelect::new( MSG_BELLMAN_CUDA_ORIGIN_SELECT, @@ -43,8 +43,8 @@ impl InitBellmanCudaArgs { } }); - Ok(InitBellmanCudaArgs { + InitBellmanCudaArgs { bellman_cuda_dir: Some(bellman_cuda_dir), - }) + } } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs index 66d97d75094c..39391977b843 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs @@ -1,3 +1,5 @@ +pub mod compressor_keys; pub mod init; pub mod init_bellman_cuda; pub mod run; +pub mod setup_keys; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index 6bdd62c1d488..751cc48074fe 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -1,8 +1,22 @@ +use anyhow::anyhow; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; +use config::ChainConfig; use strum::{EnumIter, IntoEnumIterator}; -use crate::messages::{MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT}; +use crate::{ + consts::{ + COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, + PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, + WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, + WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + }, + messages::{ + MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT, + MSG_WITNESS_GENERATOR_ROUND_ERR, + }, +}; #[derive(Debug, Clone, Parser, Default)] pub struct ProverRunArgs { @@ -12,6 +26,10 @@ pub struct ProverRunArgs { pub witness_generator_args: WitnessGeneratorArgs, #[clap(flatten)] pub witness_vector_generator_args: WitnessVectorGeneratorArgs, + #[clap(flatten)] + pub fri_prover_args: FriProverRunArgs, + #[clap(long)] + pub docker: Option, } #[derive( @@ -32,6 +50,108 @@ pub enum ProverComponent { ProverJobMonitor, } +impl ProverComponent { + pub fn image_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_DOCKER_IMAGE, + Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + Self::Prover => PROVER_DOCKER_IMAGE, + Self::Compressor => COMPRESSOR_DOCKER_IMAGE, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, + } + } + + pub fn binary_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_BINARY_NAME, + Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, + Self::Prover => PROVER_BINARY_NAME, + Self::Compressor => COMPRESSOR_BINARY_NAME, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, + } + } + + pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { + let mut application_args = vec![]; + + if self == &Self::Prover || self == &Self::Compressor { + if in_docker { + application_args.push("--gpus=all".to_string()); + } else { + application_args.push("--features=gpu".to_string()); + } + } + + Ok(application_args) + } + + pub fn get_additional_args( + &self, + in_docker: bool, + args: ProverRunArgs, + chain: &ChainConfig, + ) -> anyhow::Result> { + let mut additional_args = vec![]; + if in_docker { + additional_args.push("--config-path=/configs/general.yaml".to_string()); + additional_args.push("--secrets-path=/configs/secrets.yaml".to_string()); + } else { + let general_config = chain + .path_to_general_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + let secrets_config = chain + .path_to_secrets_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + + additional_args.push(format!("--config-path={}", general_config)); + additional_args.push(format!("--secrets-path={}", secrets_config)); + } + + match self { + Self::WitnessGenerator => { + additional_args.push( + match args + .witness_generator_args + .round + .expect(MSG_WITNESS_GENERATOR_ROUND_ERR) + { + WitnessGeneratorRound::AllRounds => "--all_rounds", + WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", + WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", + WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", + WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", + WitnessGeneratorRound::Scheduler => "--round=scheduler", + } + .to_string(), + ); + } + Self::WitnessVectorGenerator => { + additional_args.push(format!( + "--threads={}", + args.witness_vector_generator_args.threads.unwrap_or(1) + )); + } + Self::Prover => { + if args.fri_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + } + _ => {} + }; + + Ok(additional_args) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct WitnessGeneratorArgs { #[clap(long)] @@ -76,8 +196,15 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct FriProverRunArgs { + /// Memory allocation limit in bytes (for prover component) + #[clap(long)] + pub max_allocation: Option, +} + impl ProverRunArgs { - pub fn fill_values_with_prompt(&self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> anyhow::Result { let component = self.component.unwrap_or_else(|| { PromptSelect::new(MSG_RUN_COMPONENT_PROMPT, ProverComponent::iter()).ask() }); @@ -90,10 +217,18 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { + Prompt::new("Do you want to run Docker image for the component?") + .default("false") + .ask() + }); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, + fri_prover_args: self.fri_prover_args, + docker: Some(docker), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs new file mode 100644 index 000000000000..155977b8812a --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs @@ -0,0 +1,53 @@ +use clap::{Parser, ValueEnum}; +use common::PromptSelect; +use strum::{EnumIter, IntoEnumIterator}; + +use crate::messages::{MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, MSG_SETUP_KEYS_REGION_PROMPT}; + +#[derive(Debug, Clone, Parser, Default)] +pub struct SetupKeysArgs { + #[clap(long)] + pub region: Option, + #[clap(long)] + pub mode: Option, +} + +#[derive(Debug, Clone)] +pub struct SetupKeysArgsFinal { + pub region: Option, + pub mode: Mode, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Mode { + Download, + Generate, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Region { + Us, + Europe, + Asia, +} + +impl SetupKeysArgs { + pub fn fill_values_with_prompt(self) -> SetupKeysArgsFinal { + let mode = self.mode.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, Mode::iter()).ask() + }); + + if mode == Mode::Download { + let region = self.region.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_REGION_PROMPT, Region::iter()).ask() + }); + + SetupKeysArgsFinal { + region: Some(region), + mode, + } + } else { + SetupKeysArgsFinal { region: None, mode } + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs new file mode 100644 index 000000000000..fd83fccfebfa --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITE, +}; +use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; +use xshell::{cmd, Shell}; + +use super::args::compressor_keys::CompressorKeysArgs; +use crate::messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER, + MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_SETUP_KEY_PATH_ERROR, +}; + +pub(crate) async fn run(shell: &Shell, args: CompressorKeysArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let mut general_config = chain_config.get_general_config()?; + + let default_path = get_default_compressor_keys_path(&ecosystem_config)?; + let args = args.fill_values_with_prompt(&default_path); + + download_compressor_key( + shell, + &mut general_config, + &args.path.context(MSG_SETUP_KEY_PATH_ERROR)?, + )?; + + chain_config.save_general_config(&general_config)?; + + Ok(()) +} + +pub(crate) fn download_compressor_key( + shell: &Shell, + general_config: &mut GeneralConfig, + path: &str, +) -> anyhow::Result<()> { + check_prerequisites(shell, &WGET_PREREQUISITE, false); + let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); + let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config + .proof_compressor_config + .as_ref() + .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) + .clone(); + compressor_config.universal_setup_path = path.to_string(); + general_config.proof_compressor_config = Some(compressor_config.clone()); + + let url = compressor_config.universal_setup_download_url; + let path = std::path::Path::new(path); + let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); + let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); + + Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + + if file_name != "setup_2^24.key" { + Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; + } + + spinner.finish(); + Ok(()) +} + +pub fn get_default_compressor_keys_path( + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result { + let link_to_prover = get_link_to_prover(ecosystem_config); + let path = link_to_prover.join("keys/setup/setup_2^24.key"); + let string = path.to_str().unwrap(); + + Ok(String::from(string)) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs index 0c76cb10f542..f28c44504b56 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs @@ -1,4 +1,4 @@ -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITE}; use xshell::{cmd, Shell}; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; @@ -14,6 +14,8 @@ pub(crate) fn create_gcs_bucket( shell: &Shell, config: ProofStorageGCSCreateBucket, ) -> anyhow::Result { + check_prerequisites(shell, &GCLOUD_PREREQUISITE, false); + let bucket_name = config.bucket_name; let location = config.location; let project_id = config.project_id; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs deleted file mode 100644 index 7f678470d178..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ /dev/null @@ -1,29 +0,0 @@ -use anyhow::Ok; -use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; -use config::EcosystemConfig; -use xshell::{cmd, Shell}; - -use super::utils::get_link_to_prover; -use crate::messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}; - -pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let link_to_prover = get_link_to_prover(&ecosystem_config); - shell.change_dir(&link_to_prover); - - let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); - let cmd = Cmd::new(cmd!( - shell, - "cargo run --features gpu --release --bin key_generator -- - generate-sk-gpu all --recompute-if-missing - --setup-path=crates/bin/vk_setup_data_generator_server_fri/data - --path={link_to_prover}/crates/bin/vk_setup_data_generator_server_fri/data" - )); - cmd.run()?; - spinner.finish(); - logger::outro(MSG_SK_GENERATED); - - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 803ef56df832..20e682745870 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -2,48 +2,46 @@ use std::path::PathBuf; use anyhow::Context; use common::{ - check_prover_prequisites, cmd::Cmd, config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, }; -use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; -use xshell::{cmd, Shell}; -use zksync_config::{ - configs::{object_store::ObjectStoreMode, GeneralConfig}, - ObjectStoreConfig, +use config::{ + copy_configs, get_link_to_prover, set_prover_database, traits::SaveConfigWithBasePath, + EcosystemConfig, }; +use xshell::{cmd, Shell}; +use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; use super::{ args::init::{ProofStorageConfig, ProverInitArgs}, + compressor_keys::{download_compressor_key, get_default_compressor_keys_path}, gcs::create_gcs_bucket, init_bellman_cuda::run as init_bellman_cuda, - utils::get_link_to_prover, + setup_keys, }; use crate::{ + commands::prover::args::init::ProofStorageFileBacked, consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, - MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_GENERAL_CONFIG_NOT_FOUND_ERR, - MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, - MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_PROVER_CONFIG_NOT_FOUND_ERR, - MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, + MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_INITIALIZING_PROVER_DATABASE, MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, + MSG_SETUP_KEY_PATH_ERROR, }, }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + let default_compressor_key_path = get_default_compressor_keys_path(&ecosystem_config)?; let chain_config = ecosystem_config - .load_chain(Some(ecosystem_config.default_chain.clone())) + .load_chain(global_config().chain_name.clone()) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let args = args.fill_values_with_prompt(shell, &setup_key_path, &chain_config)?; + let args = args.fill_values_with_prompt(shell, &default_compressor_key_path, &chain_config)?; if chain_config.get_general_config().is_err() || chain_config.get_secrets_config().is_err() { copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; @@ -56,12 +54,13 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; let public_object_store_config = get_object_store_config(shell, args.public_store)?; - if args.setup_key_config.download_key { - download_setup_key( - shell, - &general_config, - &args.setup_key_config.setup_key_path, - )?; + if let Some(args) = args.compressor_key_args { + let path = args.path.context(MSG_SETUP_KEY_PATH_ERROR)?; + download_compressor_key(shell, &mut general_config, &path)?; + } + + if let Some(args) = args.setup_keys { + setup_keys::run(args, shell).await?; } let mut prover_config = general_config @@ -79,15 +78,11 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( prover_config.cloud_type = args.cloud_type; general_config.prover_config = Some(prover_config); - let mut proof_compressor_config = general_config - .proof_compressor_config - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR); - proof_compressor_config.universal_setup_path = args.setup_key_config.setup_key_path; - general_config.proof_compressor_config = Some(proof_compressor_config); - chain_config.save_general_config(&general_config)?; - init_bellman_cuda(shell, args.bellman_cuda_config).await?; + if let Some(args) = args.bellman_cuda_config { + init_bellman_cuda(shell, args).await?; + } if let Some(prover_db) = &args.database_config { let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); @@ -110,52 +105,16 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( Ok(()) } -fn download_setup_key( - shell: &Shell, - general_config: &GeneralConfig, - path: &str, -) -> anyhow::Result<()> { - let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_KEY_SPINNER); - let compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config - .proof_compressor_config - .as_ref() - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) - .clone(); - let url = compressor_config.universal_setup_download_url; - let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; - - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } - - spinner.finish(); - Ok(()) -} - -fn get_default_setup_key_path(ecosystem_config: &EcosystemConfig) -> anyhow::Result { - let link_to_prover = get_link_to_prover(ecosystem_config); - let path = link_to_prover.join("keys/setup/setup_2^24.key"); - let string = path.to_str().unwrap(); - - Ok(String::from(string)) -} - fn get_object_store_config( shell: &Shell, config: Option, ) -> anyhow::Result> { let object_store = match config { - Some(ProofStorageConfig::FileBacked(config)) => Some(ObjectStoreConfig { - mode: ObjectStoreMode::FileBacked { - file_backed_base_path: config.proof_store_dir, - }, - max_retries: PROVER_STORE_MAX_RETRIES, - local_mirror_path: None, - }), + Some(ProofStorageConfig::FileBacked(config)) => Some(init_file_backed_proof_storage( + shell, + &EcosystemConfig::from_file(shell)?, + config, + )?), Some(ProofStorageConfig::GCS(config)) => Some(ObjectStoreConfig { mode: ObjectStoreMode::GCSWithCredentialFile { bucket_base_url: config.bucket_base_url, @@ -198,3 +157,27 @@ async fn initialize_prover_database( Ok(()) } + +fn init_file_backed_proof_storage( + shell: &Shell, + ecosystem_config: &EcosystemConfig, + config: ProofStorageFileBacked, +) -> anyhow::Result { + let proof_store_dir = config.proof_store_dir.clone(); + let prover_path = get_link_to_prover(ecosystem_config); + + let proof_store_dir = prover_path.join(proof_store_dir).join("witness_inputs"); + + let cmd = Cmd::new(cmd!(shell, "mkdir -p {proof_store_dir}")); + cmd.run()?; + + let object_store_config = ObjectStoreConfig { + mode: ObjectStoreMode::FileBacked { + file_backed_base_path: config.proof_store_dir, + }, + max_retries: PROVER_STORE_MAX_RETRIES, + local_mirror_path: None, + }; + + Ok(object_store_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs index 75535587c42c..615ef841488b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, git, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, git, logger, spinner::Spinner, GPU_PREREQUISITES}; use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -13,11 +13,11 @@ use crate::{ }; pub(crate) async fn run(shell: &Shell, args: InitBellmanCudaArgs) -> anyhow::Result<()> { - check_prover_prequisites(shell); + check_prerequisites(shell, &GPU_PREREQUISITES, false); let mut ecosystem_config = EcosystemConfig::from_file(shell)?; - let args = args.fill_values_with_prompt()?; + let args = args.fill_values_with_prompt(); let bellman_cuda_dir = args.bellman_cuda_dir.unwrap_or("".to_string()); let bellman_cuda_dir = if bellman_cuda_dir.is_empty() { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index 31c3a02e3806..d9e443cdae0d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -1,14 +1,19 @@ -use args::{init::ProverInitArgs, init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs}; +use args::{ + compressor_keys::CompressorKeysArgs, init::ProverInitArgs, + init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs, +}; use clap::Subcommand; use xshell::Shell; +use crate::commands::prover::args::setup_keys::SetupKeysArgs; + mod args; +mod compressor_keys; mod gcs; -mod generate_sk; mod init; mod init_bellman_cuda; mod run; -mod utils; +mod setup_keys; #[derive(Subcommand, Debug)] pub enum ProverCommands { @@ -16,19 +21,23 @@ pub enum ProverCommands { Init(Box), /// Generate setup keys #[command(alias = "sk")] - GenerateSK, + SetupKeys(SetupKeysArgs), /// Run prover Run(ProverRunArgs), /// Initialize bellman-cuda #[command(alias = "cuda")] InitBellmanCuda(Box), + /// Download compressor keys + #[command(alias = "ck")] + CompressorKeys(CompressorKeysArgs), } pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { match args { ProverCommands::Init(args) => init::run(*args, shell).await, - ProverCommands::GenerateSK => generate_sk::run(shell).await, + ProverCommands::SetupKeys(args) => setup_keys::run(args, shell).await, ProverCommands::Run(args) => run::run(args, shell).await, ProverCommands::InitBellmanCuda(args) => init_bellman_cuda::run(shell, *args).await, + ProverCommands::CompressorKeys(args) => compressor_keys::run(shell, args).await, } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 056723836662..5f4bf2f4a671 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,141 +1,147 @@ -use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, logger}; -use config::{ChainConfig, EcosystemConfig}; +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; +use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; +use config::{get_link_to_prover, ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; -use super::{ - args::run::{ - ProverComponent, ProverRunArgs, WitnessGeneratorArgs, WitnessGeneratorRound, - WitnessVectorGeneratorArgs, - }, - utils::get_link_to_prover, -}; +use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, - MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, - MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); let args = args.fill_values_with_prompt()?; let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain = ecosystem_config - .load_chain(Some(ecosystem_config.default_chain.clone())) + .load_chain(global_config().chain_name.clone()) .expect(MSG_CHAIN_NOT_FOUND_ERR); let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(link_to_prover.clone()); - match args.component { - Some(ProverComponent::Gateway) => run_gateway(shell, &chain)?, - Some(ProverComponent::WitnessGenerator) => { - run_witness_generator(shell, &chain, args.witness_generator_args)? + let component = args.component.context(anyhow!(MSG_MISSING_COMPONENT_ERR))?; + let in_docker = args.docker.unwrap_or(false); + + let application_args = component.get_application_args(in_docker)?; + let additional_args = component.get_additional_args(in_docker, args, &chain)?; + + let (message, error) = match component { + ProverComponent::WitnessGenerator => ( + MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, + ), + ProverComponent::WitnessVectorGenerator => ( + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + ), + ProverComponent::Prover => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } - Some(ProverComponent::WitnessVectorGenerator) => { - run_witness_vector_generator(shell, &chain, args.witness_vector_generator_args)? + ProverComponent::Compressor => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + shell.set_var( + "BELLMAN_CUDA_DIR", + ecosystem_config + .bellman_cuda_dir + .clone() + .expect(MSG_BELLMAN_CUDA_DIR_ERR), + ); + } + (MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR) } - Some(ProverComponent::Prover) => run_prover(shell, &chain)?, - Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, - Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, - None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), + ProverComponent::ProverJobMonitor => ( + MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, + ), + ProverComponent::Gateway => (MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR), + }; + + if in_docker { + let path_to_configs = chain.configs.clone(); + let path_to_prover = get_link_to_prover(&ecosystem_config); + update_setup_data_path(&chain, "prover/data/keys".to_string())?; + run_dockerized_component( + shell, + component.image_name(), + &application_args, + &additional_args, + message, + error, + &path_to_configs, + &path_to_prover, + )? + } else { + update_setup_data_path(&chain, "data/keys".to_string())?; + run_binary_component( + shell, + component.binary_name(), + &application_args, + &additional_args, + message, + error, + )? } Ok(()) } -fn run_gateway(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_GATEWAY); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_fri_gateway -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_GATEWAY_ERR) -} - -fn run_witness_generator( +#[allow(clippy::too_many_arguments)] +fn run_dockerized_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessGeneratorArgs, + image_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, + path_to_configs: &PathBuf, + path_to_prover: &PathBuf, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - let round = args.round.expect(MSG_WITNESS_GENERATOR_ROUND_ERR); + logger::info(message); - let round_str = match round { - WitnessGeneratorRound::AllRounds => "--all_rounds", - WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", - WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", - WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", - WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", - WitnessGeneratorRound::Scheduler => "--round=scheduler", - }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + )); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_generator -- {round_str} --config-path={config_path} --secrets-path={secrets_path}")); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_GENERATOR_ERR) + cmd.run().context(error) } -fn run_witness_vector_generator( +fn run_binary_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessVectorGeneratorArgs, + binary_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_VECTOR_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); + logger::info(message); - let threads = args.threads.unwrap_or(1).to_string(); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_vector_generator -- --config-path={config_path} --secrets-path={secrets_path} --threads={threads}")); + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run {application_args...} --release --bin {binary_name} -- {args...}" + )); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR) + cmd.run().context(error) } -fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new( - cmd!(shell, "cargo run --features gpu --release --bin zksync_prover_fri -- --config-path={config_path} --secrets-path={secrets_path}"), - ); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_ERR) -} - -fn run_compressor( - shell: &Shell, - chain: &ChainConfig, - ecosystem: &EcosystemConfig, -) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_COMPRESSOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - shell.set_var( - "BELLMAN_CUDA_DIR", - ecosystem - .bellman_cuda_dir - .clone() - .expect(MSG_BELLMAN_CUDA_DIR_ERR), - ); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --features gpu --release --bin zksync_proof_fri_compressor -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) -} - -fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) +fn update_setup_data_path(chain: &ChainConfig, path: String) -> anyhow::Result<()> { + let mut general_config = chain.get_general_config()?; + general_config + .prover_config + .as_mut() + .expect("Prover config not found") + .setup_data_path = path; + chain.save_general_config(&general_config)?; + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs new file mode 100644 index 000000000000..ae0480e872dd --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs @@ -0,0 +1,81 @@ +use anyhow::Ok; +use common::{ + check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, +}; +use config::{get_link_to_prover, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::{ + commands::prover::args::setup_keys::{Mode, Region, SetupKeysArgs}, + messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}, +}; + +pub(crate) async fn run(args: SetupKeysArgs, shell: &Shell) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + if args.mode == Mode::Generate { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + let link_to_prover = get_link_to_prover(&ecosystem_config); + shell.change_dir(&link_to_prover); + + let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); + let cmd = Cmd::new(cmd!( + shell, + "cargo run --features gpu --release --bin key_generator -- + generate-sk-gpu all --recompute-if-missing + --setup-path=data/keys + --path={link_to_prover}/data/keys" + )); + cmd.run()?; + spinner.finish(); + logger::outro(MSG_SK_GENERATED); + } else { + check_prerequisites(shell, &GCLOUD_PREREQUISITE, false); + + let link_to_setup_keys = get_link_to_prover(&ecosystem_config).join("data/keys"); + let path_to_keys_buckets = + get_link_to_prover(&ecosystem_config).join("setup-data-gpu-keys.json"); + + let region = args.region.expect("Region is not provided"); + + let file = shell + .read_file(path_to_keys_buckets) + .expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_str(&file).expect("Could not parse commitments.json"); + + let bucket = &match region { + Region::Us => json + .get("us") + .expect("Could not find link to US bucket") + .to_string(), + Region::Europe => json + .get("europe") + .expect("Could not find link to Europe bucket") + .to_string(), + Region::Asia => json + .get("asia") + .expect("Could not find link to Asia bucket") + .to_string(), + }; + + let len = bucket.len() - 2usize; + let bucket = &bucket[1..len]; + + let spinner = Spinner::new(&format!( + "Downloading keys from bucket: {} to {:?}", + bucket, link_to_setup_keys + )); + + let cmd = Cmd::new(cmd!( + shell, + "gsutil -m rsync -r {bucket} {link_to_setup_keys}" + )); + cmd.run()?; + spinner.finish(); + logger::outro("Keys are downloaded"); + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs deleted file mode 100644 index 4dae70863dc9..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::path::PathBuf; - -use config::EcosystemConfig; - -pub(crate) fn get_link_to_prover(config: &EcosystemConfig) -> PathBuf { - let link_to_code = config.link_to_code.clone(); - let mut link_to_prover = link_to_code.into_os_string(); - link_to_prover.push("/prover"); - link_to_prover.into() -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index b5a09ed04370..f96bc3aeebc9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -35,7 +35,11 @@ fn run_server( chain_config: &ChainConfig, shell: &Shell, ) -> anyhow::Result<()> { - let server = Server::new(args.components.clone(), chain_config.link_to_code.clone()); + let server = Server::new( + args.components.clone(), + chain_config.link_to_code.clone(), + args.uring, + ); if args.build { server.build(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 7463dc28570e..22e570a5439e 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr}; + pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -8,5 +10,46 @@ pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; + +#[allow(non_upper_case_globals)] +const kB: usize = 1024; + +/// Max payload size for consensus in bytes +pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; +/// Max batch size for consensus in bytes +/// Compute a default batch size, so operators are not caught out by the missing setting +/// while we're still working on batch syncing. The batch interval is ~1 minute, +/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high +/// traffic there can be thousands of huge transactions that quickly fill up blocks +/// and there could be more blocks in a batch then expected. We chose a generous +/// limit so as not to prevent any legitimate batch from being transmitted. +pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; +/// Gossip dynamic inbound limit for consensus +pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; + +/// Public address for consensus +pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +/// Server address for consensus +pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + +/// Path to the JS runtime config for the block-explorer-app docker container to be mounted to +pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; +pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; +/// Path to the JS runtime config for the dapp-portal docker container to be mounted to +pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PORTAL_DOCKER_CONTAINER_PORT: u16 = 3000; + +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = + "matterlabs/witness-vector-generator:latest2.0"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; + +pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; +pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; +pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; +pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; +pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 34b0eeae4195..6c3821eed856 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -7,11 +7,14 @@ lazy_static! { Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); pub static ref DATABASE_PROVER_URL: Url = Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); + pub static ref DATABASE_EXPLORER_URL: Url = + Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); } pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; pub const ROCKS_DB_TREE: &str = "tree"; pub const ROCKS_DB_PROTECTIVE_READS: &str = "protective_reads"; +pub const ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER: &str = "basic_witness_input_producer"; pub const EN_ROCKS_DB_PREFIX: &str = "en"; pub const MAIN_ROCKS_DB_PREFIX: &str = "main"; @@ -40,6 +43,14 @@ pub fn generate_db_names(config: &ChainConfig) -> DBNames { } } +pub fn generate_explorer_db_name(config: &ChainConfig) -> String { + format!( + "zksync_explorer_{}_{}", + config.l1_network.to_string().to_ascii_lowercase(), + config.name + ) +} + pub fn generate_external_node_db_name(config: &ChainConfig) -> String { format!( "external_node_{}_{}", diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zk_toolbox/crates/zk_inception/src/external_node.rs index 0770fa8b14cd..5ff4ce070250 100644 --- a/zk_toolbox/crates/zk_inception/src/external_node.rs +++ b/zk_toolbox/crates/zk_inception/src/external_node.rs @@ -1,12 +1,12 @@ use std::path::PathBuf; use anyhow::Context; -use common::cmd::Cmd; use config::{ external_node::ENConfig, traits::FileConfigWithDefaultName, ChainConfig, GeneralConfig, SecretsConfig, }; -use xshell::{cmd, Shell}; +use xshell::Shell; +use zksync_config::configs::consensus::ConsensusConfig; use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; @@ -16,6 +16,7 @@ pub struct RunExternalNode { general_config: PathBuf, secrets: PathBuf, en_config: PathBuf, + consensus_config: PathBuf, } impl RunExternalNode { @@ -30,6 +31,7 @@ impl RunExternalNode { let general_config = GeneralConfig::get_path_with_base_path(&en_path); let secrets = SecretsConfig::get_path_with_base_path(&en_path); let enconfig = ENConfig::get_path_with_base_path(&en_path); + let consensus_config = ConsensusConfig::get_path_with_base_path(&en_path); Ok(Self { components, @@ -37,33 +39,40 @@ impl RunExternalNode { general_config, secrets, en_config: enconfig, + consensus_config, }) } - pub fn run(&self, shell: &Shell, mut additional_args: Vec) -> anyhow::Result<()> { - shell.change_dir(&self.code_path); + pub fn run( + &self, + shell: &Shell, + enable_consensus: bool, + mut additional_args: Vec, + ) -> anyhow::Result<()> { + let code_path = self.code_path.to_str().unwrap(); let config_general_config = &self.general_config.to_str().unwrap(); let en_config = &self.en_config.to_str().unwrap(); let secrets = &self.secrets.to_str().unwrap(); + let consensus_config = &self.consensus_config.to_str().unwrap(); if let Some(components) = self.components() { additional_args.push(format!("--components={}", components)) } - let cmd = Cmd::new( - cmd!( - shell, - "cargo run --release --bin zksync_external_node -- - --config-path {config_general_config} - --secrets-path {secrets} - --external-node-config-path {en_config} - " - ) - .args(additional_args) - .env_remove("RUSTUP_TOOLCHAIN"), - ) - .with_force_run(); + let mut consensus_args = vec![]; + if enable_consensus { + consensus_args.push("--enable-consensus".to_string()); + consensus_args.push(format!("--consensus-path={}", consensus_config)) + } - cmd.run().context(MSG_FAILED_TO_RUN_SERVER_ERR)?; - Ok(()) + common::external_node::run( + shell, + code_path, + config_general_config, + secrets, + en_config, + consensus_args, + additional_args, + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) } fn components(&self) -> Option { diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index 77c6f5322bfb..bc4ed2ffa5a9 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -13,9 +13,10 @@ use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::{PortalArgs, RunServerArgs}, + args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, + explorer::ExplorerCommands, external_node::ExternalNodeCommands, prover::ProverCommands, database::DatabaseArgs, @@ -61,8 +62,11 @@ pub enum InceptionSubcommands { #[command(subcommand)] ContractVerifier(ContractVerifierCommands), /// Run dapp-portal - Portal(PortalArgs), - /// Update zkSync + Portal, + /// Run block-explorer + #[command(subcommand)] + Explorer(ExplorerCommands), + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), /// Standalone database initializer @@ -127,7 +131,8 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res InceptionSubcommands::ContractVerifier(args) => { commands::contract_verifier::run(shell, args).await? } - InceptionSubcommands::Portal(args) => commands::portal::run(shell, args).await?, + InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, + InceptionSubcommands::Portal => commands::portal::run(shell).await?, InceptionSubcommands::Update(args) => commands::update::run(shell, args)?, InceptionSubcommands::Database(args) => commands::database::run(shell, args).await?, InceptionSubcommands::Markdown => { diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index cfd6ec97c560..a01a3052d9da 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -5,6 +5,10 @@ use ethers::{ utils::format_ether, }; +pub(super) const MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT: &str = + "Do you want to download the setup keys or generate them?"; +pub(super) const MSG_SETUP_KEYS_REGION_PROMPT: &str = + "From which region you want setup keys to be downloaded?"; /// Common messages pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = @@ -38,15 +42,16 @@ pub(super) const MSG_ECOSYSTEM_CONFIG_INVALID_ERR: &str = "Invalid ecosystem con pub(super) const MSG_LINK_TO_CODE_SELECTION_CLONE: &str = "Clone for me (recommended)"; pub(super) const MSG_LINK_TO_CODE_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_NOT_MAIN_REPO_OR_FORK_ERR: &str = - "It's not a zkSync Era main repository or fork"; + "It's not a ZKsync Era main repository or fork"; pub(super) const MSG_CONFIRM_STILL_USE_FOLDER: &str = "Do you still want to use this folder?"; pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { - format!("Path to zkSync Era repo does not exist: {path:?}") + format!("Path to ZKsync Era repo does not exist: {path:?}") } /// Ecosystem and chain init related messages pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; +pub(super) const MSG_PORT_OFFSET_HELP: &str = "Add a costant offset to the ports exposed by the components. Useful when running multiple chains on the same machine"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; pub(super) const MSG_DEV_ARG_HELP: &str = "Deploy ecosystem using all defaults. Suitable for local development"; @@ -58,7 +63,7 @@ pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = pub(super) const MSG_L1_RPC_URL_PROMPT: &str = "What is the RPC URL of the L1 network?"; pub(super) const MSG_DEPLOY_PAYMASTER_PROMPT: &str = "Do you want to deploy Paymaster contract?"; pub(super) const MSG_DEPLOY_ERC20_PROMPT: &str = "Do you want to deploy some test ERC20s?"; -pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZkSync ecosystem config. \ +pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT: &str = "Provide the path to the ecosystem contracts or keep it empty and you will use ZKsync ecosystem config. \ For using this config, you need to have governance wallet"; pub(super) const MSG_L1_RPC_URL_INVALID_ERR: &str = "Invalid RPC URL"; pub(super) const MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR: &str = "Invalid path"; @@ -113,6 +118,9 @@ pub(super) fn msg_chain_doesnt_exist_err(chain_name: &str, chains: &Vec) chain_name, chains ) } +pub(super) fn msg_chain_load_err(chain_name: &str) -> String { + format!("Failed to load chain config for {chain_name}") +} /// Chain create related messages pub(super) const MSG_PROVER_MODE_HELP: &str = "Prover options"; @@ -150,6 +158,8 @@ pub(super) const MSG_CHAIN_ID_VALIDATOR_ERR: &str = "Invalid chain id"; pub(super) const MSG_BASE_TOKEN_ADDRESS_VALIDATOR_ERR: &str = "Invalid base token address"; pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; +pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = + "Token Multiplier Setter not found. Specify it in a wallet config"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; @@ -195,6 +205,14 @@ pub(super) fn msg_server_db_name_prompt(chain_name: &str) -> String { format!("Please provide server database name for chain {chain_name}") } +pub(super) fn msg_explorer_db_url_prompt(chain_name: &str) -> String { + format!("Please provide explorer database url for chain {chain_name}") +} + +pub(super) fn msg_explorer_db_name_prompt(chain_name: &str) -> String { + format!("Please provide explorer database name for chain {chain_name}") +} + /// Chain initialize bridges related messages pub(super) const MSG_DEPLOYING_L2_CONTRACT_SPINNER: &str = "Deploying l2 contracts"; @@ -203,10 +221,12 @@ pub(super) const MSG_DEPLOYING_PAYMASTER: &str = "Deploying paymaster"; /// Run server related messages pub(super) const MSG_SERVER_COMPONENTS_HELP: &str = "Components of server to run"; +pub(super) const MSG_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; pub(super) const MSG_SERVER_GENESIS_HELP: &str = "Run server in genesis mode"; pub(super) const MSG_SERVER_ADDITIONAL_ARGS_HELP: &str = "Additional arguments that can be passed through the CLI"; pub(super) const MSG_SERVER_BUILD_HELP: &str = "Build server but don't run it"; +pub(super) const MSG_SERVER_URING_HELP: &str = "Enables uring support for RocksDB"; /// Accept ownership related messages pub(super) const MSG_ACCEPTING_GOVERNANCE_SPINNER: &str = "Accepting governance..."; @@ -227,14 +247,46 @@ pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; pub(super) const MSG_PREPARING_EN_CONFIGS: &str = "Preparing External Node config"; /// Portal related messages -pub(super) const MSG_PORTAL_CONFIG_IS_EMPTY_ERR: &str = "Hyperchains config is empty"; +pub(super) const MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run portal for"; pub(super) const MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create portal config"; pub(super) const MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR: &str = "Failed to run portal docker container"; +pub(super) fn msg_portal_running_with_config(path: &Path) -> String { + format!("Running portal with configuration from: {}", path.display()) +} pub(super) fn msg_portal_starting_on(host: &str, port: u16) -> String { format!("Starting portal on http://{host}:{port}") } +/// Explorer related messages +pub(super) const MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR: &str = + "Failed to drop explorer database"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR: &str = + "Failed to run docker compose with explorer services"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = + "Failed to run explorer docker container"; +pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = + "Failed to create explorer config"; +pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; +pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; +pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { + format!("Initializing explorer database for {chain} chain") +} +pub(super) fn msg_explorer_running_with_config(path: &Path) -> String { + format!( + "Running explorer with configuration from: {}", + path.display() + ) +} +pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { + format!("Starting explorer on http://{host}:{port}") +} +pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { + format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") +} + /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; @@ -256,6 +308,11 @@ pub(super) fn msg_preparing_en_config_is_done(path: &Path) -> String { pub(super) const MSG_EXTERNAL_NODE_CONFIG_NOT_INITIALIZED: &str = "External node is not initialized"; +pub(super) const MSG_CONSENSUS_CONFIG_MISSING_ERR: &str = "Consensus config is missing"; +pub(super) const MSG_CONSENSUS_SECRETS_MISSING_ERR: &str = "Consensus secrets config is missing"; +pub(super) const MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR: &str = "Consensus node key is missing"; + +pub(super) const MSG_PORTS_CONFIG_ERR: &str = "Failed to get ports config"; pub(super) const MSG_STARTING_EN: &str = "Starting external node"; @@ -264,6 +321,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR_ERR: &str = "Failed to run prover job monitor"; pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; @@ -297,9 +355,13 @@ pub(super) const MSG_CREATE_GCS_BUCKET_NAME_PROMTP: &str = "What do you want to pub(super) const MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT: &str = "What location do you want to use? Find available locations at https://cloud.google.com/storage/docs/locations"; pub(super) const MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR: &str = "Proof compressor config not found"; -pub(super) const MSG_DOWNLOADING_SETUP_KEY_SPINNER: &str = "Downloading setup key..."; -pub(super) const MSG_DOWNLOAD_SETUP_KEY_PROMPT: &str = "Do you want to download the setup key?"; -pub(super) const MSG_SETUP_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; +pub(super) const MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER: &str = + "Downloading compressor setup key..."; +pub(super) const MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT: &str = + "Do you want to download the setup key for compressor?"; +pub(super) const MSG_INITIALIZE_BELLMAN_CUDA_PROMPT: &str = + "Do you want to initialize bellman-cuda?"; +pub(super) const MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; pub(super) const MSG_GETTING_GCP_PROJECTS_SPINNER: &str = "Getting GCP projects..."; pub(super) const MSG_GETTING_PROOF_STORE_CONFIG: &str = "Getting proof store configuration..."; pub(super) const MSG_GETTING_PUBLIC_STORE_CONFIG: &str = "Getting public store configuration..."; @@ -321,6 +383,7 @@ pub(super) const MSG_BELLMAN_CUDA_SELECTION_CLONE: &str = "Clone for me (recomme pub(super) const MSG_BELLMAN_CUDA_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_CLOUD_TYPE_PROMPT: &str = "Select the cloud connection mode:"; pub(super) const MSG_THREADS_PROMPT: &str = "Provide the number of threads:"; +pub(super) const MSG_SETUP_KEYS_PROMPT: &str = "Do you want to setup keys?"; pub(super) fn msg_bucket_created(bucket_name: &str) -> String { format!("Bucket created successfully with url: gs://{bucket_name}") @@ -361,8 +424,8 @@ pub(super) fn msg_downloading_binary_spinner(name: &str, version: &str) -> Strin /// Update related messages pub(super) const MSG_UPDATE_ONLY_CONFIG_HELP: &str = "Update only the config files"; -pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZkSync"; -pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZkSync updated successfully"; +pub(super) const MSG_UPDATING_ZKSYNC: &str = "Updating ZKsync"; +pub(super) const MSG_ZKSYNC_UPDATED: &str = "ZKsync updated successfully"; pub(super) const MSG_PULLING_ZKSYNC_CODE_SPINNER: &str = "Pulling zksync-era repo..."; pub(super) const MSG_UPDATING_SUBMODULES_SPINNER: &str = "Updating submodules..."; pub(super) const MSG_DIFF_GENERAL_CONFIG: &str = diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs new file mode 100644 index 000000000000..06848334a6e1 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -0,0 +1,124 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, +}; + +use config::{ChainConfig, PortsConfig}; +use secrecy::{ExposeSecret, Secret}; +use zksync_config::configs::consensus::{ + AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, + NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, + WeightedAttester, WeightedValidator, +}; +use zksync_consensus_crypto::{Text, TextFmt}; +use zksync_consensus_roles as roles; + +use crate::consts::{ + CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, + MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, +}; + +#[derive(Debug, Clone)] +pub struct ConsensusSecretKeys { + validator_key: roles::validator::SecretKey, + attester_key: roles::attester::SecretKey, + node_key: roles::node::SecretKey, +} + +pub struct ConsensusPublicKeys { + validator_key: roles::validator::PublicKey, + attester_key: roles::attester::PublicKey, +} + +pub fn get_consensus_config( + chain_config: &ChainConfig, + ports: PortsConfig, + consensus_keys: Option, + gossip_static_outbound: Option>, +) -> anyhow::Result { + let genesis_spec = + consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); + + let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, ports.consensus_port); + let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, ports.consensus_port); + + Ok(ConsensusConfig { + server_addr, + public_addr: Host(public_addr.encode()), + genesis_spec, + max_payload_size: MAX_PAYLOAD_SIZE, + gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, + max_batch_size: MAX_BATCH_SIZE, + gossip_static_inbound: BTreeSet::new(), + gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), + rpc: None, + }) +} + +pub fn generate_consensus_keys() -> ConsensusSecretKeys { + ConsensusSecretKeys { + validator_key: roles::validator::SecretKey::generate(), + attester_key: roles::attester::SecretKey::generate(), + node_key: roles::node::SecretKey::generate(), + } +} + +fn get_consensus_public_keys(consensus_keys: &ConsensusSecretKeys) -> ConsensusPublicKeys { + ConsensusPublicKeys { + validator_key: consensus_keys.validator_key.public(), + attester_key: consensus_keys.attester_key.public(), + } +} + +pub fn get_genesis_specs( + chain_config: &ChainConfig, + consensus_keys: &ConsensusSecretKeys, +) -> GenesisSpec { + let public_keys = get_consensus_public_keys(consensus_keys); + let validator_key = public_keys.validator_key.encode(); + let attester_key = public_keys.attester_key.encode(); + + let validator = WeightedValidator { + key: ValidatorPublicKey(validator_key.clone()), + weight: 1, + }; + let attester = WeightedAttester { + key: AttesterPublicKey(attester_key), + weight: 1, + }; + let leader = ValidatorPublicKey(validator_key); + + GenesisSpec { + chain_id: chain_config.chain_id, + protocol_version: ProtocolVersion(1), + validators: vec![validator], + attesters: vec![attester], + leader, + registry_address: None, + } +} + +pub fn get_consensus_secrets(consensus_keys: &ConsensusSecretKeys) -> ConsensusSecrets { + let validator_key = consensus_keys.validator_key.encode(); + let attester_key = consensus_keys.attester_key.encode(); + let node_key = consensus_keys.node_key.encode(); + + ConsensusSecrets { + validator_key: Some(ValidatorSecretKey(Secret::new(validator_key))), + attester_key: Some(AttesterSecretKey(Secret::new(attester_key))), + node_key: Some(NodeSecretKey(Secret::new(node_key))), + } +} + +pub fn node_public_key(secrets: &ConsensusSecrets) -> anyhow::Result> { + Ok(node_key(secrets)?.map(|node_secret_key| NodePublicKey(node_secret_key.public().encode()))) +} +fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result> { + read_secret_text(secrets.node_key.as_ref().map(|x| &x.0)) +} + +fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { + text.map(|text| Text::new(text.expose_secret()).decode()) + .transpose() + .map_err(|_| anyhow::format_err!("invalid format")) +} diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zk_toolbox/crates/zk_inception/src/utils/mod.rs index a84f0a336de5..229d3908dc3a 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/mod.rs @@ -1,2 +1,3 @@ +pub mod consensus; pub mod forge; pub mod rocks_db; diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs index 17cffa66e39d..1b7e29dd9722 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs @@ -4,8 +4,8 @@ use config::RocksDbs; use xshell::Shell; use crate::defaults::{ - EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_PROTECTIVE_READS, ROCKS_DB_STATE_KEEPER, - ROCKS_DB_TREE, + EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER, + ROCKS_DB_PROTECTIVE_READS, ROCKS_DB_STATE_KEEPER, ROCKS_DB_TREE, }; pub enum RocksDBDirOption { @@ -37,9 +37,13 @@ pub fn recreate_rocksdb_dirs( .join(option.prefix()) .join(ROCKS_DB_PROTECTIVE_READS); shell.remove_path(&protective_reads)?; + let basic_witness_input_producer = rocks_db_path + .join(option.prefix()) + .join(ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER); Ok(RocksDbs { state_keeper: shell.create_dir(state_keeper)?, merkle_tree: shell.create_dir(merkle_tree)?, protective_reads: shell.create_dir(protective_reads)?, + basic_witness_input_producer: shell.create_dir(basic_witness_input_producer)?, }) } diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index e24c88f3ec25..d9c5c2196fae 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -15,12 +15,15 @@ anyhow.workspace = true clap.workspace = true common.workspace = true config.workspace = true +ethers.workspace = true human-panic.workspace = true strum.workspace = true tokio.workspace = true url.workspace = true xshell.workspace = true serde.workspace = true +serde_json.workspace = true clap-markdown.workspace = true futures.workspace = true -serde_json.workspace = true +types.workspace = true +serde_yaml.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md index 1f880cdcb30a..865bd2f0d579 100644 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ b/zk_toolbox/crates/zk_supervisor/README.md @@ -5,7 +5,6 @@ This document contains the help content for the `zk_supervisor` command-line pro **Command Overview:** - [`zk_supervisor`↴](#zk_supervisor) -- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) - [`zk_supervisor database`↴](#zk_supervisor-database) - [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) - [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) @@ -19,12 +18,23 @@ This document contains the help content for the `zk_supervisor` command-line pro - [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) - [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) - [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) +- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) +- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) +- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) - [`zk_supervisor clean`↴](#zk_supervisor-clean) - [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) - [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) - [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) - [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) - [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) +- [`zk_supervisor lint`↴](#zk_supervisor-lint) +- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) +- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) +- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) +- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) +- [`zk_supervisor prover info`↴](#zk_supervisor-prover-info) +- [`zk_supervisor prover insert-version`↴](#zk_supervisor-prover-insert-version) +- [`zk_supervisor prover insert-batch`↴](#zk_supervisor-prover-insert-batch) ## `zk_supervisor` @@ -38,6 +48,9 @@ ZK Toolbox is a set of tools for working with zk stack. - `test` — Run tests - `clean` — Clean artifacts - `snapshot` — Snapshots creator +- `lint` — Lint code +- `fmt` — Format code +- `prover-version` — Protocol version used by provers ###### **Options:** @@ -45,12 +58,6 @@ ZK Toolbox is a set of tools for working with zk stack. - `--chain ` — Chain to use - `--ignore-prerequisites` — Ignores prerequisites checks -## `zk_supervisor prover-version` - -Gets information about current protocol version of provers in `zksync-era` and snark wrapper hash. - -**Usage:** `zk_supervisor prover-version` - ## `zk_supervisor database` Database related commands @@ -189,6 +196,9 @@ Run tests - `revert` — Run revert tests - `recovery` — Run recovery tests - `upgrade` — Run upgrade tests +- `rust` — Run unit-tests, accepts optional cargo test flags +- `l1-contracts` — Run L1 contracts tests +- `prover` — Run prover tests ## `zk_supervisor test integration` @@ -227,6 +237,28 @@ Run upgrade tests **Usage:** `zk_supervisor test upgrade` +## `zk_supervisor test rust` + +Run unit-tests, accepts optional cargo test flags + +**Usage:** `zk_supervisor test rust [OPTIONS]` + +###### **Options:** + +- `--options ` — Cargo test flags + +## `zk_supervisor test l1-contracts` + +Run L1 contracts tests + +**Usage:** `zk_supervisor test l1-contracts` + +## `zk_supervisor test prover` + +Run prover tests + +**Usage:** `zk_supervisor test prover` + ## `zk_supervisor clean` Clean artifacts @@ -271,6 +303,83 @@ Snapshots creator **Usage:** `zk_supervisor snapshot create` +## `zk_supervisor lint` + +Lint code + +**Usage:** `zk_supervisor lint [OPTIONS]` + +###### **Options:** + +- `-c`, `--check` +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor fmt` + +Format code + +**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` + +###### **Subcommands:** + +- `rustfmt` — +- `contract` — +- `prettier` — + +###### **Options:** + +- `-c`, `--check` + +## `zk_supervisor fmt rustfmt` + +**Usage:** `zk_supervisor fmt rustfmt` + +## `zk_supervisor fmt contract` + +**Usage:** `zk_supervisor fmt contract` + +## `zk_supervisor fmt prettier` + +**Usage:** `zk_supervisor fmt prettier [OPTIONS]` + +###### **Options:** + +- `-e`, `--extensions ` + + Possible values: `md`, `sol`, `js`, `ts`, `rs` + +## `zk_supervisor prover info` + +Prints prover protocol version, snark wrapper and prover database URL + +**Usage:** `zk_supervisor prover info` + +## `zk_supervisor prover insert-version` + +Inserts protocol version into prover database. + +**Usage:** `zk_supervisor prover insert-version [OPTIONS]` + +###### **Options:** + +- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. +- `--snark-wrapper ` — Snark wrapper hash. +- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). + +## `zk_supervisor prover insert-batch` + +Inserts batch into prover database. + +**Usage:** `zk_supervisor prover insert-batch` + +###### **Options:** + +- `--number ` — Number of the batch to insert. +- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. +- `--default` - use default value for protocol version (the one found in zksync-era). +
This document was generated automatically by diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs new file mode 100644 index 000000000000..bab4205cd66f --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs @@ -0,0 +1,147 @@ +use std::path::PathBuf; + +use clap::Parser; +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::{ + MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, + MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, + MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, + MSG_NOTHING_TO_BUILD_MSG, +}; + +#[derive(Debug, Parser)] +pub struct ContractsArgs { + #[clap(long, alias = "l1", help = MSG_BUILD_L1_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub l1_contracts: Option, + #[clap(long, alias = "l2", help = MSG_BUILD_L2_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub l2_contracts: Option, + #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub system_contracts: Option, + #[clap(long, alias = "test", help = MSG_BUILD_TEST_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub test_contracts: Option, +} + +impl ContractsArgs { + fn contracts(&self) -> Vec { + if self.l1_contracts.is_none() + && self.l2_contracts.is_none() + && self.system_contracts.is_none() + && self.test_contracts.is_none() + { + return vec![ + ContractType::L1, + ContractType::L2, + ContractType::SystemContracts, + ContractType::TestContracts, + ]; + } + + let mut contracts = vec![]; + + if self.l1_contracts.unwrap_or(false) { + contracts.push(ContractType::L1); + } + if self.l2_contracts.unwrap_or(false) { + contracts.push(ContractType::L2); + } + if self.system_contracts.unwrap_or(false) { + contracts.push(ContractType::SystemContracts); + } + if self.test_contracts.unwrap_or(false) { + contracts.push(ContractType::TestContracts); + } + + contracts + } +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum ContractType { + L1, + L2, + SystemContracts, + TestContracts, +} + +#[derive(Debug)] +struct ContractBuilder { + dir: PathBuf, + cmd: String, + msg: String, +} + +impl ContractBuilder { + fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { + match contract_type { + ContractType::L1 => Self { + dir: ecosystem.path_to_foundry(), + cmd: "forge build".to_string(), + msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), + }, + ContractType::L2 => Self { + dir: ecosystem.link_to_code.clone(), + cmd: "yarn l2-contracts build".to_string(), + msg: MSG_BUILDING_L2_CONTRACTS_SPINNER.to_string(), + }, + ContractType::SystemContracts => Self { + dir: ecosystem.link_to_code.join("contracts"), + cmd: "yarn sc build".to_string(), + msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), + }, + ContractType::TestContracts => Self { + dir: ecosystem.link_to_code.join("etc/contracts-test-data"), + cmd: "yarn build".to_string(), + msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + }, + } + } + + fn build(&self, shell: &Shell) -> anyhow::Result<()> { + let spinner = Spinner::new(&self.msg); + let _dir_guard = shell.push_dir(&self.dir); + + let mut args = self.cmd.split_whitespace().collect::>(); + let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty + let mut cmd = cmd!(shell, "{command}"); + + for arg in args { + cmd = cmd.arg(arg); + } + + Cmd::new(cmd).run()?; + + spinner.finish(); + Ok(()) + } +} + +pub fn run(shell: &Shell, args: ContractsArgs) -> anyhow::Result<()> { + let contracts = args.contracts(); + if contracts.is_empty() { + logger::outro(MSG_NOTHING_TO_BUILD_MSG); + return Ok(()); + } + + logger::info(MSG_BUILDING_CONTRACTS); + + let ecosystem = EcosystemConfig::from_file(shell)?; + let link_to_code = ecosystem.link_to_code.clone(); + + let spinner = Spinner::new(MSG_CONTRACTS_DEPS_SPINNER); + let _dir_guard = shell.push_dir(&link_to_code); + Cmd::new(cmd!(shell, "yarn install")).run()?; + spinner.finish(); + + contracts + .iter() + .map(|contract| ContractBuilder::new(&ecosystem, *contract)) + .try_for_each(|builder| builder.build(shell))?; + + logger::outro(MSG_BUILDING_CONTRACTS_SUCCESS); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs index 1541e7f518d8..cf9dfc2834a8 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs @@ -2,17 +2,28 @@ use clap::Parser; use crate::{ dals::SelectedDals, - messages::{MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_PROVER_HELP}, + messages::{ + MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_CORE_URL_HELP, + MSG_DATABASE_COMMON_PROVER_HELP, MSG_DATABASE_COMMON_PROVER_URL_HELP, + }, }; pub mod new_migration; #[derive(Debug, Parser)] pub struct DatabaseCommonArgs { - #[clap(short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_PROVER_HELP)] + #[clap( + short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_PROVER_HELP + )] pub prover: Option, - #[clap(short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_CORE_HELP)] + #[clap(long, help = MSG_DATABASE_COMMON_PROVER_URL_HELP)] + pub prover_url: Option, + #[clap( + short, long, default_missing_value = "true", num_args = 0..=1, help = MSG_DATABASE_COMMON_CORE_HELP + )] pub core: Option, + #[clap(long, help = MSG_DATABASE_COMMON_CORE_URL_HELP)] + pub core_url: Option, } impl DatabaseCommonArgs { @@ -23,6 +34,10 @@ impl DatabaseCommonArgs { prover: true, core: true, }, + urls: DalUrls { + prover: self.prover_url, + core: self.core_url, + }, }; } @@ -31,11 +46,22 @@ impl DatabaseCommonArgs { prover: self.prover.unwrap_or(false), core: self.core.unwrap_or(false), }, + urls: DalUrls { + prover: self.prover_url, + core: self.core_url, + }, } } } +#[derive(Debug, Clone)] +pub struct DalUrls { + pub prover: Option, + pub core: Option, +} + #[derive(Debug)] pub struct DatabaseCommonArgsFinal { pub selected_dals: SelectedDals, + pub urls: DalUrls, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs index 6a5bc663dc7f..0c401595690e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs @@ -25,7 +25,7 @@ pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { logger::info(msg_database_info(MSG_DATABASE_CHECK_SQLX_DATA_GERUND)); - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { check_sqlx_data(shell, &ecosystem_config.link_to_code, dal)?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs index 075f21d3b1a3..94bf325a2c6c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs @@ -23,7 +23,7 @@ pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> logger::info(msg_database_info(MSG_DATABASE_DROP_GERUND)); - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { drop_database(dal).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs index 72bc7d59148e..1d648965c244 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs @@ -23,7 +23,7 @@ pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { logger::info(msg_database_info(MSG_DATABASE_MIGRATE_GERUND)); let ecosystem_config = EcosystemConfig::from_file(shell)?; - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { migrate_database(shell, &ecosystem_config.link_to_code, dal)?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs index e942e6f3f4f8..415b81879f1b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs @@ -8,7 +8,7 @@ use crate::messages::{ MSG_DATABASE_SETUP_ABOUT, }; -mod args; +pub mod args; mod check_sqlx_data; mod drop; mod migrate; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs index 127e01bdc10f..e21b7cde47ba 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs @@ -14,8 +14,8 @@ pub fn run(shell: &Shell, args: DatabaseNewMigrationArgs) -> anyhow::Result<()> let args = args.fill_values_with_prompt(); let dal = match args.selected_database { - SelectedDatabase::Core => get_core_dal(shell)?, - SelectedDatabase::Prover => get_prover_dal(shell)?, + SelectedDatabase::Core => get_core_dal(shell, None)?, + SelectedDatabase::Prover => get_prover_dal(shell, None)?, }; let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs index 48f32319ac55..82ec12f94129 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs @@ -24,7 +24,7 @@ pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { logger::info(msg_database_info(MSG_DATABASE_PREPARE_GERUND)); - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { prepare_sqlx_data(shell, &ecosystem_config.link_to_code, dal)?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs index 88f2069bf3ae..f0262cecb959 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs @@ -20,14 +20,14 @@ pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> return Ok(()); } - let ecoseystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = EcosystemConfig::from_file(shell)?; logger::info(msg_database_info(MSG_DATABASE_RESET_GERUND)); - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { logger::info(msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); - reset_database(shell, ecoseystem_config.link_to_code.clone(), dal).await?; + reset_database(shell, ecosystem_config.link_to_code.clone(), dal).await?; } logger::outro(msg_database_success(MSG_DATABASE_RESET_PAST)); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs index d9d37041774b..15b3ac5c1c72 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs @@ -24,7 +24,7 @@ pub fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> { logger::info(msg_database_info(MSG_DATABASE_SETUP_GERUND)); - let dals = get_dals(shell, &args.selected_dals)?; + let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { setup_database(shell, &ecosystem_config.link_to_code, dal)?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index fa0f4cef7bfe..fc55ed2c1f6f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -6,24 +6,25 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, msg_running_rustfmt_for_dir_spinner, MSG_RUNNING_CONTRACTS_FMT_SPINNER, }, }; -async fn prettier(shell: Shell, extension: Extension, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(extension)); - let files = get_unignored_files(&shell, &extension)?; +async fn prettier(shell: Shell, target: Target, check: bool) -> anyhow::Result<()> { + let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(target)); + let files = get_unignored_files(&shell, &target)?; if files.is_empty() { + logger::info(format!("No files for {target} found")); return Ok(()); } spinner.freeze(); let mode = if check { "--check" } else { "--write" }; - let config = format!("etc/prettier-config/{extension}.js"); + let config = format!("etc/prettier-config/{target}.js"); Ok( Cmd::new(cmd!(shell, "yarn --silent prettier {mode} --config {config}").args(files)) .run()?, @@ -68,7 +69,7 @@ pub enum Formatter { Contract, Prettier { #[arg(short, long)] - extensions: Vec, + targets: Vec, }, } @@ -85,8 +86,7 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { match args.formatter { None => { let mut tasks = vec![]; - let extensions: Vec<_> = - vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + let extensions: Vec<_> = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); spinner.freeze(); for ext in extensions { @@ -108,13 +108,13 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { } }); } - Some(Formatter::Prettier { mut extensions }) => { - if extensions.is_empty() { - extensions = vec![Extension::Js, Extension::Ts, Extension::Md, Extension::Sol]; + Some(Formatter::Prettier { mut targets }) => { + if targets.is_empty() { + targets = vec![Target::Js, Target::Ts, Target::Md, Target::Sol]; } - let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&extensions)); - for ext in extensions { - prettier(shell.clone(), ext, args.check).await? + let spinner = Spinner::new(&msg_running_fmt_for_extensions_spinner(&targets)); + for target in targets { + prettier(shell.clone(), target, args.check).await? } spinner.finish() } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs index 17c8680f1d24..1861d164ce44 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs @@ -4,7 +4,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use crate::{ - commands::lint_utils::{get_unignored_files, Extension}, + commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_linter_for_extension_spinner, msg_running_linters_for_files, MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, @@ -17,31 +17,32 @@ const CONFIG_PATH: &str = "etc/lint-config"; pub struct LintArgs { #[clap(long, short = 'c')] pub check: bool, - #[clap(long, short = 'e')] - pub extensions: Vec, + #[clap(long, short = 't')] + pub targets: Vec, } pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { - let extensions = if args.extensions.is_empty() { + let targets = if args.targets.is_empty() { vec![ - Extension::Rs, - Extension::Md, - Extension::Sol, - Extension::Js, - Extension::Ts, + Target::Rs, + Target::Md, + Target::Sol, + Target::Js, + Target::Ts, + Target::Contracts, ] } else { - args.extensions.clone() + args.targets.clone() }; - logger::info(msg_running_linters_for_files(&extensions)); + logger::info(msg_running_linters_for_files(&targets)); let ecosystem = EcosystemConfig::from_file(shell)?; - for extension in extensions { - match extension { - Extension::Rs => lint_rs(shell, &ecosystem, args.check)?, - Extension::Sol => lint_contracts(shell, &ecosystem, args.check)?, + for target in targets { + match target { + Target::Rs => lint_rs(shell, &ecosystem, args.check)?, + Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } @@ -50,7 +51,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { } fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Extension::Rs)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(&Target::Rs)); let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); @@ -61,14 +62,7 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R for path in paths { let _dir_guard = shell.push_dir(path); let mut cmd = cmd!(shell, "cargo clippy"); - let common_args = &[ - "--locked", - "--", - "-D", - "warnings", - "-D", - "unstable_features", - ]; + let common_args = &["--locked", "--", "-D", "warnings"]; if !check { cmd = cmd.args(&["--fix", "--allow-dirty"]); } @@ -79,34 +73,35 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R Ok(()) } -fn get_linter(extension: &Extension) -> Vec { - match extension { - Extension::Rs => vec!["cargo".to_string(), "clippy".to_string()], - Extension::Md => vec!["markdownlint".to_string()], - Extension::Sol => vec!["solhint".to_string()], - Extension::Js => vec!["eslint".to_string()], - Extension::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], +fn get_linter(target: &Target) -> Vec { + match target { + Target::Rs => vec!["cargo".to_string(), "clippy".to_string()], + Target::Md => vec!["markdownlint".to_string()], + Target::Sol => vec!["solhint".to_string()], + Target::Js => vec!["eslint".to_string()], + Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], + Target::Contracts => vec![], } } fn lint( shell: &Shell, ecosystem: &EcosystemConfig, - extension: &Extension, + target: &Target, check: bool, ) -> anyhow::Result<()> { - let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(extension)); + let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(target)); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); - let files = get_unignored_files(shell, extension)?; + let files = get_unignored_files(shell, target)?; let cmd = cmd!(shell, "yarn"); let config_path = ecosystem.link_to_code.join(CONFIG_PATH); - let config_path = config_path.join(format!("{}.js", extension)); + let config_path = config_path.join(format!("{}.js", target)); let config_path = config_path .to_str() .expect(MSG_LINT_CONFIG_PATH_ERR) .to_string(); - let linter = get_linter(extension); + let linter = get_linter(target); let fix_option = if check { vec![] @@ -128,8 +123,6 @@ fn lint( } fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::Result<()> { - lint(shell, ecosystem, &Extension::Sol, check)?; - let spinner = Spinner::new(MSG_RUNNING_CONTRACTS_LINTER_SPINNER); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); let cmd = cmd!(shell, "yarn"); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs index 92fac6ea815f..a7236dc04fb3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs @@ -1,55 +1,37 @@ use clap::ValueEnum; +use serde::{Deserialize, Serialize}; use strum::EnumIter; use xshell::{cmd, Shell}; -const IGNORED_DIRS: [&str; 18] = [ - "target", - "node_modules", - "volumes", - "build", - "dist", - ".git", - "generated", - "grafonnet-lib", - "prettier-config", - "lint-config", - "cache", - "artifacts", - "typechain", - "binaryen", - "system-contracts", - "artifacts-zk", - "cache-zk", - // Ignore directories with OZ and forge submodules. - "contracts/l1-contracts/lib", -]; - -const IGNORED_FILES: [&str; 4] = [ - "KeysWithPlonkVerifier.sol", - "TokenInit.sol", - ".tslintrc.js", - ".prettierrc.js", -]; +const IGNORE_FILE: &str = "etc/lint-config/ignore.yaml"; #[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] #[strum(serialize_all = "lowercase")] -pub enum Extension { +pub enum Target { Md, Sol, Js, Ts, Rs, + Contracts, +} + +#[derive(Deserialize, Serialize, Debug)] +struct IgnoredData { + files: Vec, + dirs: Vec, } -pub fn get_unignored_files(shell: &Shell, extension: &Extension) -> anyhow::Result> { +pub fn get_unignored_files(shell: &Shell, target: &Target) -> anyhow::Result> { let mut files = Vec::new(); + let ignored_files: IgnoredData = serde_yaml::from_str(&shell.read_file(IGNORE_FILE)?)?; let output = cmd!(shell, "git ls-files --recurse-submodules").read()?; for line in output.lines() { let path = line.to_string(); - if !IGNORED_DIRS.iter().any(|dir| path.contains(dir)) - && !IGNORED_FILES.contains(&path.as_str()) - && path.ends_with(&format!(".{}", extension)) + if !ignored_files.dirs.iter().any(|dir| path.contains(dir)) + && !ignored_files.files.contains(&path) + && path.ends_with(&format!(".{}", target)) { files.push(path); } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 181ce50c2134..875f2982c959 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,8 +1,9 @@ pub mod clean; +pub mod contracts; pub mod database; pub mod fmt; pub mod lint; pub(crate) mod lint_utils; -pub mod prover_version; +pub mod prover; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs new file mode 100644 index 000000000000..e837bbe9eb86 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs @@ -0,0 +1,40 @@ +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct InsertBatchArgs { + #[clap(long)] + pub number: Option, + #[clap(long, default_value = "false")] + pub default: bool, + #[clap(long)] + pub version: Option, +} + +#[derive(Debug)] +pub struct InsertBatchArgsFinal { + pub number: u32, + pub version: String, +} + +impl InsertBatchArgs { + pub(crate) fn fill_values_with_prompts(self, era_version: String) -> InsertBatchArgsFinal { + let number = self.number.unwrap_or_else(|| { + common::Prompt::new("Enter the number of the batch to insert").ask() + }); + + if self.default { + return InsertBatchArgsFinal { + number, + version: era_version, + }; + } + + let version = self.version.unwrap_or_else(|| { + common::Prompt::new("Enter the version of the batch to insert") + .default(&era_version) + .ask() + }); + + InsertBatchArgsFinal { number, version } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs new file mode 100644 index 000000000000..97e60fb38f8c --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs @@ -0,0 +1,49 @@ +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct InsertVersionArgs { + #[clap(long, default_value = "false")] + pub default: bool, + #[clap(long)] + pub version: Option, + #[clap(long)] + pub snark_wrapper: Option, +} + +#[derive(Debug)] +pub struct InsertVersionArgsFinal { + pub snark_wrapper: String, + pub version: String, +} + +impl InsertVersionArgs { + pub(crate) fn fill_values_with_prompts( + self, + era_version: String, + snark_wrapper: String, + ) -> InsertVersionArgsFinal { + if self.default { + return InsertVersionArgsFinal { + snark_wrapper, + version: era_version, + }; + } + + let version = self.version.unwrap_or_else(|| { + common::Prompt::new("Enter the version of the protocol to insert") + .default(&era_version) + .ask() + }); + + let snark_wrapper = self.snark_wrapper.unwrap_or_else(|| { + common::Prompt::new("Enter the snark wrapper of the protocol to insert") + .default(&snark_wrapper) + .ask() + }); + + InsertVersionArgsFinal { + snark_wrapper, + version, + } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs new file mode 100644 index 000000000000..0984546136c9 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs @@ -0,0 +1,2 @@ +pub mod insert_batch; +pub mod insert_version; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs new file mode 100644 index 000000000000..6a7d7ddeda8a --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs @@ -0,0 +1,95 @@ +use std::{ + fs, + path::{Path, PathBuf}, +}; + +use anyhow::Context as _; +use common::{config::global_config, logger}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + let link_to_code = ecosystem_config.link_to_code; + let link_to_prover = link_to_code.join("prover"); + + let protocol_version = get_protocol_version(shell, &link_to_prover).await?; + let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; + let prover_url = get_database_url(&chain_config).await?; + + logger::info(format!( + " +=============================== \n +Current prover setup information: \n +Protocol version: {} \n +Snark wrapper: {} \n +Database URL: {}\n +===============================", + protocol_version, snark_wrapper, prover_url + )); + + Ok(()) +} + +pub(crate) async fn get_protocol_version( + shell: &Shell, + link_to_prover: &PathBuf, +) -> anyhow::Result { + shell.change_dir(link_to_prover); + let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; + + Ok(protocol_version) +} + +pub(crate) async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { + let path = link_to_prover.join("data/keys/commitments.json"); + let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_reader(file).expect("Could not parse commitments.json"); + + let snark_wrapper = json + .get("snark_wrapper") + .expect("Could not find snark_wrapper in commitments.json"); + + let mut snark_wrapper = snark_wrapper.to_string(); + snark_wrapper.pop(); + snark_wrapper.remove(0); + + Ok(snark_wrapper) +} + +pub(crate) async fn get_database_url(chain: &ChainConfig) -> anyhow::Result { + let prover_url = chain + .get_secrets_config()? + .database + .context("Database secrets not found")? + .prover_url()? + .expose_url() + .to_string(); + Ok(prover_url) +} + +pub fn parse_version(version: &str) -> anyhow::Result<(&str, &str)> { + let splitted: Vec<&str> = version.split(".").collect(); + + assert_eq!(splitted.len(), 3, "Invalid version format"); + assert_eq!(splitted[0], "0", "Invalid major version, expected 0"); + + splitted[1] + .parse::() + .context("Could not parse minor version")?; + splitted[2] + .parse::() + .context("Could not parse patch version")?; + + let minor = splitted[1]; + let patch = splitted[2]; + + Ok((minor, patch)) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs new file mode 100644 index 000000000000..b1c02c9a9fea --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs @@ -0,0 +1,46 @@ +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, +}; +use config::{get_link_to_prover, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::{ + commands::prover::{ + args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, + info, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +pub async fn run(shell: &Shell, args: InsertBatchArgs) -> anyhow::Result<()> { + check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; + let prover_url = info::get_database_url(&chain_config).await?; + + let InsertBatchArgsFinal { number, version } = args.fill_values_with_prompts(version); + + let (minor, patch) = info::parse_version(&version)?; + + logger::info(format!( + "Inserting protocol version {}, batch number {} into the database", + version, number + )); + + let number = number.to_string(); + + let cmd = Cmd::new(cmd!( + shell, + "prover_cli {prover_url} insert-batch --version={minor} --patch={patch} --number={number}" + )); + cmd.run()?; + + logger::info("Done."); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs new file mode 100644 index 000000000000..16bbdf13df4f --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs @@ -0,0 +1,46 @@ +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, +}; +use config::{get_link_to_prover, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::{ + commands::prover::{ + args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, + info, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +pub async fn run(shell: &Shell, args: InsertVersionArgs) -> anyhow::Result<()> { + check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; + let snark_wrapper = info::get_snark_wrapper(&get_link_to_prover(&ecosystem_config)).await?; + + let prover_url = info::get_database_url(&chain_config).await?; + + let InsertVersionArgsFinal { + version, + snark_wrapper, + } = args.fill_values_with_prompts(version, snark_wrapper); + + let (minor, patch) = info::parse_version(&version)?; + + logger::info(format!( + "Inserting protocol version {}, snark wrapper {} into the database", + version, snark_wrapper + )); + + let cmd = Cmd::new(cmd!(shell, "prover_cli {prover_url} insert-version --version={minor} --patch={patch} --snark-wrapper={snark_wrapper}")); + cmd.run()?; + + logger::info("Done."); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs new file mode 100644 index 000000000000..364f8fe93efc --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs @@ -0,0 +1,22 @@ +use clap::Subcommand; +use xshell::Shell; + +mod args; +pub mod info; +pub mod insert_batch; +pub mod insert_version; + +#[derive(Subcommand, Debug)] +pub enum ProverCommands { + Info, + InsertBatch(args::insert_batch::InsertBatchArgs), + InsertVersion(args::insert_version::InsertVersionArgs), +} + +pub async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { + match args { + ProverCommands::Info => info::run(shell).await, + ProverCommands::InsertBatch(args) => insert_batch::run(shell, args).await, + ProverCommands::InsertVersion(args) => insert_version::run(shell, args).await, + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs deleted file mode 100644 index 479f796294fa..000000000000 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::{fs, path::Path}; - -use common::logger; -use config::EcosystemConfig; -use xshell::{cmd, Shell}; - -pub async fn run(shell: &Shell) -> anyhow::Result<()> { - let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; - let link_to_prover = link_to_code.join("prover"); - - let protocol_version = get_protocol_version(shell, &link_to_prover).await?; - let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; - - logger::info(format!( - "Current protocol version found in zksync-era: {}, snark_wrapper: {}", - protocol_version, snark_wrapper - )); - - Ok(()) -} - -async fn get_protocol_version(shell: &Shell, link_to_prover: &Path) -> anyhow::Result { - shell.change_dir(link_to_prover); - let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; - - Ok(protocol_version) -} - -async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { - let path = - link_to_prover.join("crates/bin/vk_setup_data_generator_server_fri/data/commitments.json"); - let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); - let json: serde_json::Value = - serde_json::from_reader(file).expect("Could not parse commitments.json"); - - let snark_wrapper = json - .get("snark_wrapper") - .expect("Could not find snark_wrapper in commitments.json"); - - Ok(snark_wrapper.to_string()) -} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs index aac9f5345d42..4ec44579aaf5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs @@ -1,6 +1,6 @@ use anyhow::Context; use clap::Subcommand; -use common::{cmd::Cmd, logger}; +use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -24,7 +24,7 @@ pub(crate) async fn run(shell: &Shell, args: SnapshotCommands) -> anyhow::Result async fn create(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem - .load_chain(Some(ecosystem.default_chain.clone())) + .load_chain(global_config().chain_name.clone()) .context(MSG_CHAIN_NOT_FOUND_ERR)?; let config_path = chain.path_to_general_config(); @@ -36,5 +36,5 @@ async fn create(shell: &Shell) -> anyhow::Result<()> { .env("RUST_LOG", "snapshots_creator=debug"); cmd = cmd.with_force_run(); - cmd.run().context("MSG") + cmd.run().context("Snapshot") } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs index a41ccf3d48df..435dddfc360c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -1,10 +1,14 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::MSG_TESTS_EXTERNAL_NODE_HELP; +use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { #[clap(short, long, help = MSG_TESTS_EXTERNAL_NODE_HELP)] pub external_node: bool, + #[clap(short, long, help = MSG_NO_DEPS_HELP)] + pub no_deps: bool, + #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs index ddd5c5588a0c..d74d5e64a7d5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs @@ -2,3 +2,4 @@ pub mod integration; pub mod recovery; pub mod revert; pub mod rust; +pub mod upgrade; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs index 3bddc6bce1f1..81cc58fbd9bd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs @@ -1,10 +1,14 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::MSG_TESTS_RECOVERY_SNAPSHOT_HELP; +use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RecoveryArgs { #[clap(short, long, help = MSG_TESTS_RECOVERY_SNAPSHOT_HELP)] pub snapshot: bool, + #[clap(short, long, help = MSG_NO_DEPS_HELP)] + pub no_deps: bool, + #[clap(short, long, help = MSG_NO_KILL_HELP)] + pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs index e4305b6796c2..0154a4c0afd7 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs @@ -1,6 +1,9 @@ use clap::Parser; -use crate::messages::{MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP}; +use crate::messages::{ + MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, + MSG_TESTS_EXTERNAL_NODE_HELP, +}; #[derive(Debug, Parser)] pub struct RevertArgs { @@ -8,4 +11,8 @@ pub struct RevertArgs { pub enable_consensus: bool, #[clap(short, long, help = MSG_TESTS_EXTERNAL_NODE_HELP)] pub external_node: bool, + #[clap(short, long, help = MSG_NO_DEPS_HELP)] + pub no_deps: bool, + #[clap(short, long, help = MSG_NO_KILL_HELP)] + pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs new file mode 100644 index 000000000000..dd96957e9d3b --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs @@ -0,0 +1,9 @@ +use clap::Parser; + +use crate::messages::MSG_NO_DEPS_HELP; + +#[derive(Debug, Parser)] +pub struct UpgradeArgs { + #[clap(short, long, help = MSG_NO_DEPS_HELP)] + pub no_deps: bool, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs new file mode 100644 index 000000000000..f48967f59738 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs @@ -0,0 +1,13 @@ +use config::EcosystemConfig; +use xshell::Shell; + +use super::utils::{build_contracts, install_and_build_dependencies}; + +pub fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + build_contracts(shell, &ecosystem_config)?; + install_and_build_dependencies(shell, &ecosystem_config)?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index f44559fe4e07..fb3e1436acc3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -1,31 +1,56 @@ -use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; +use std::path::PathBuf; + +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use super::args::integration::IntegrationArgs; +use super::{ + args::integration::IntegrationArgs, + utils::{build_contracts, install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, +}; use crate::messages::{ - msg_integration_tests_run, MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, - MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, MSG_INTEGRATION_TESTS_RUN_SUCCESS, + msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, + MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; -const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; -pub fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); logger::info(msg_integration_tests_run(args.external_node)); - build_repository(shell, &ecosystem_config)?; - build_test_contracts(shell, &ecosystem_config)?; + if !args.no_deps { + build_contracts(shell, &ecosystem_config)?; + install_and_build_dependencies(shell, &ecosystem_config)?; + } + + let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); + let wallets: TestWallets = serde_json::from_str(shell.read_file(&wallets_path)?.as_ref()) + .context(MSG_DESERIALIZE_TEST_WALLETS_ERR)?; - let mut command = cmd!(shell, "yarn jest --forceExit --testTimeout 60000") - .env("CHAIN_NAME", ecosystem_config.default_chain); + wallets + .init_test_wallet(&ecosystem_config, &chain_config) + .await?; + + let test_pattern = args.test_pattern; + let mut command = cmd!( + shell, + "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" + ) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); if args.external_node { command = command.env("EXTERNAL_NODE", format!("{:?}", args.external_node)) } + if global_config().verbose { command = command.env( "ZKSYNC_DEBUG_LOGS", @@ -39,27 +64,3 @@ pub fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { Ok(()) } - -fn build_repository(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); - let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES); - - Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; - Cmd::new(cmd!(shell, "yarn utils build")).run()?; - - spinner.finish(); - Ok(()) -} - -fn build_test_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { - let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS); - - Cmd::new(cmd!(shell, "yarn build")).run()?; - Cmd::new(cmd!(shell, "yarn build-yul")).run()?; - - let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(CONTRACTS_TEST_DATA_PATH)); - Cmd::new(cmd!(shell, "yarn build")).run()?; - - spinner.finish(); - Ok(()) -} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs new file mode 100644 index 000000000000..5a2a87871b58 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs @@ -0,0 +1,52 @@ +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; + +pub fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let general_api = chain_config + .get_general_config()? + .api_config + .context("API config is not found")?; + + let mut command = cmd!(shell, "cargo run --release --bin loadnext") + .env( + "L2_CHAIN_ID", + chain_config + .get_genesis_config()? + .l2_chain_id + .as_u64() + .to_string(), + ) + .env( + "MAIN_TOKEN", + format!( + "{:?}", + ecosystem_config + .get_erc20_tokens() + .first() + .context("NO Erc20 tokens were deployed")? + .address + ), + ) + .env("L2_RPC_ADDRESS", general_api.web3_json_rpc.http_url) + .env("L2_WS_RPC_ADDRESS", general_api.web3_json_rpc.ws_url); + + if global_config().verbose { + command = command.env("RUST_LOG", "loadnext=info") + } + + Cmd::new(command).with_force_run().run()?; + + logger::outro("Loadtest success"); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 70177888d1d5..712e2f75eefd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -1,22 +1,28 @@ use args::{ integration::IntegrationArgs, recovery::RecoveryArgs, revert::RevertArgs, rust::RustArgs, + upgrade::UpgradeArgs, }; use clap::Subcommand; use xshell::Shell; use crate::messages::{ - MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_PROVER_TEST_ABOUT, - MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_UPGRADE_TEST_ABOUT, + MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, + MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, + MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, }; mod args; +mod build; mod integration; mod l1_contracts; +mod loadtest; mod prover; mod recovery; mod revert; mod rust; mod upgrade; +mod utils; +mod wallet; #[derive(Subcommand, Debug)] pub enum TestCommands { @@ -27,23 +33,32 @@ pub enum TestCommands { #[clap(about = MSG_RECOVERY_TEST_ABOUT, alias = "rec")] Recovery(RecoveryArgs), #[clap(about = MSG_UPGRADE_TEST_ABOUT, alias = "u")] - Upgrade, + Upgrade(UpgradeArgs), + #[clap(about = MSG_BUILD_ABOUT)] + Build, #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, #[clap(about = MSG_PROVER_TEST_ABOUT, alias = "p")] Prover, + #[clap(about = MSG_TEST_WALLETS_INFO)] + Wallet, + #[clap(about = MSG_LOADTEST_ABOUT)] + Loadtest, } pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { match args { - TestCommands::Integration(args) => integration::run(shell, args), - TestCommands::Revert(args) => revert::run(shell, args), - TestCommands::Recovery(args) => recovery::run(shell, args), - TestCommands::Upgrade => upgrade::run(shell), + TestCommands::Integration(args) => integration::run(shell, args).await, + TestCommands::Revert(args) => revert::run(shell, args).await, + TestCommands::Recovery(args) => recovery::run(shell, args).await, + TestCommands::Upgrade(args) => upgrade::run(shell, args), + TestCommands::Build => build::run(shell), TestCommands::Rust(args) => rust::run(shell, args).await, TestCommands::L1Contracts => l1_contracts::run(shell), TestCommands::Prover => prover::run(shell), + TestCommands::Wallet => wallet::run(shell), + TestCommands::Loadtest => loadtest::run(shell), } } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs index fdde6a61f896..c69a9826d56c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs @@ -1,43 +1,47 @@ -use common::{cmd::Cmd, logger, server::Server, spinner::Spinner}; +use std::path::PathBuf; + +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger, server::Server, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use super::args::recovery::RecoveryArgs; -use crate::messages::{MSG_RECOVERY_TEST_RUN_INFO, MSG_RECOVERY_TEST_RUN_SUCCESS}; +use super::{ + args::recovery::RecoveryArgs, + utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, +}; +use crate::messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_RECOVERY_TEST_RUN_INFO, + MSG_RECOVERY_TEST_RUN_SUCCESS, +}; const RECOVERY_TESTS_PATH: &str = "core/tests/recovery-test"; -pub fn run(shell: &Shell, args: RecoveryArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell, args: RecoveryArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; shell.change_dir(ecosystem_config.link_to_code.join(RECOVERY_TESTS_PATH)); logger::info(MSG_RECOVERY_TEST_RUN_INFO); - Server::new(None, ecosystem_config.link_to_code.clone()).build(shell)?; - install_and_build_dependencies(shell, &ecosystem_config)?; - run_test(shell, &args, &ecosystem_config)?; - logger::outro(MSG_RECOVERY_TEST_RUN_SUCCESS); + Server::new(None, ecosystem_config.link_to_code.clone(), false).build(shell)?; - Ok(()) -} + if !args.no_deps { + install_and_build_dependencies(shell, &ecosystem_config)?; + } + + run_test(shell, &args, &ecosystem_config).await?; + logger::outro(MSG_RECOVERY_TEST_RUN_SUCCESS); -fn install_and_build_dependencies( - shell: &Shell, - ecosystem_config: &EcosystemConfig, -) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); - let spinner = Spinner::new("Installing and building dependencies..."); - Cmd::new(cmd!(shell, "yarn install")).run()?; - Cmd::new(cmd!(shell, "yarn utils build")).run()?; - spinner.finish(); Ok(()) } -fn run_test( +async fn run_test( shell: &Shell, args: &RecoveryArgs, ecosystem_config: &EcosystemConfig, ) -> anyhow::Result<()> { Spinner::new("Running test...").freeze(); + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let cmd = if args.snapshot { cmd!(shell, "yarn mocha tests/snapshot-recovery.test.ts") @@ -45,7 +49,19 @@ fn run_test( cmd!(shell, "yarn mocha tests/genesis-recovery.test.ts") }; - let cmd = Cmd::new(cmd).env("CHAIN_NAME", &ecosystem_config.default_chain); + let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); + let wallets: TestWallets = serde_json::from_str(shell.read_file(&wallets_path)?.as_ref()) + .context(MSG_DESERIALIZE_TEST_WALLETS_ERR)?; + + wallets + .init_test_wallet(ecosystem_config, &chain_config) + .await?; + + let cmd = Cmd::new(cmd) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("NO_KILL", args.no_kill.to_string()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); + cmd.with_force_run().run()?; Ok(()) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs index eead83303eed..97794efeb3e1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs @@ -1,54 +1,66 @@ -use common::{cmd::Cmd, logger, spinner::Spinner}; +use std::path::PathBuf; + +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use super::args::revert::RevertArgs; +use super::{ + args::revert::RevertArgs, + utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, +}; use crate::messages::{ - msg_revert_tests_run, MSG_REVERT_TEST_INSTALLING_DEPENDENCIES, MSG_REVERT_TEST_RUN_INFO, - MSG_REVERT_TEST_RUN_SUCCESS, + msg_revert_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, + MSG_REVERT_TEST_RUN_INFO, MSG_REVERT_TEST_RUN_SUCCESS, }; const REVERT_TESTS_PATH: &str = "core/tests/revert-test"; -pub fn run(shell: &Shell, args: RevertArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell, args: RevertArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; shell.change_dir(ecosystem_config.link_to_code.join(REVERT_TESTS_PATH)); logger::info(MSG_REVERT_TEST_RUN_INFO); - install_and_build_dependencies(shell, &ecosystem_config)?; - run_test(shell, &args, &ecosystem_config)?; - logger::outro(MSG_REVERT_TEST_RUN_SUCCESS); - Ok(()) -} + if !args.no_deps { + install_and_build_dependencies(shell, &ecosystem_config)?; + } -fn install_and_build_dependencies( - shell: &Shell, - ecosystem_config: &EcosystemConfig, -) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); - let spinner = Spinner::new(MSG_REVERT_TEST_INSTALLING_DEPENDENCIES); - Cmd::new(cmd!(shell, "yarn install")).run()?; - Cmd::new(cmd!(shell, "yarn utils build")).run()?; + run_test(shell, &args, &ecosystem_config).await?; + logger::outro(MSG_REVERT_TEST_RUN_SUCCESS); - spinner.finish(); Ok(()) } -fn run_test( +async fn run_test( shell: &Shell, args: &RevertArgs, ecosystem_config: &EcosystemConfig, ) -> anyhow::Result<()> { Spinner::new(&msg_revert_tests_run(args.external_node)).freeze(); + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); + let wallets: TestWallets = serde_json::from_str(shell.read_file(&wallets_path)?.as_ref()) + .context(MSG_DESERIALIZE_TEST_WALLETS_ERR)?; + + wallets + .init_test_wallet(ecosystem_config, &chain_config) + .await?; + let cmd = if args.external_node { cmd!(shell, "yarn mocha tests/revert-and-restart-en.test.ts") } else { cmd!(shell, "yarn mocha tests/revert-and-restart.test.ts") }; - let mut cmd = Cmd::new(cmd).env("CHAIN_NAME", &ecosystem_config.default_chain); + let mut cmd = Cmd::new(cmd) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("NO_KILL", args.no_kill.to_string()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); if args.enable_consensus { cmd = cmd.env("ENABLE_CONSENSUS", "true"); } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index 9134ad08246e..3ac331becc9f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -1,15 +1,19 @@ +use std::{path::Path, str::FromStr}; + use anyhow::Context; use common::{cmd::Cmd, db::wait_for_db, logger}; use config::EcosystemConfig; +use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; use crate::{ commands::database, - dals::get_test_dals, + dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, + defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ - MSG_CARGO_NEXTEST_MISSING_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, - MSG_RESETTING_TEST_DATABASES, MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, + MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, + MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, }, }; @@ -19,23 +23,47 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .clone() .load_chain(Some(ecosystem.default_chain)) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let general_config = chain.get_general_config()?; - let postgres = general_config - .postgres_config - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; - - reset_test_databases(shell).await?; + let general_config = chain.get_general_config(); + let link_to_code = ecosystem.link_to_code; - let _dir_guard = shell.push_dir(&ecosystem.link_to_code); + let (test_server_url, test_prover_url) = if let Ok(general_config) = general_config { + let postgres = general_config + .postgres_config + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; - let cmd = if nextest_is_installed(shell)? { - logger::info(MSG_USING_CARGO_NEXTEST); - cmd!(shell, "cargo nextest run --release") + ( + postgres + .test_server_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + postgres + .test_prover_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + ) } else { - logger::error(MSG_CARGO_NEXTEST_MISSING_ERR); - cmd!(shell, "cargo test --release") + ( + TEST_DATABASE_SERVER_URL.to_string(), + TEST_DATABASE_PROVER_URL.to_string(), + ) }; + let dals = vec![ + Dal { + url: Url::from_str(&test_server_url.clone())?, + path: CORE_DAL_PATH.to_string(), + }, + Dal { + url: Url::from_str(&test_prover_url.clone())?, + path: PROVER_DAL_PATH.to_string(), + }, + ]; + + reset_test_databases(shell, &link_to_code, dals).await?; + + let _dir_guard = shell.push_dir(&link_to_code); + + logger::info(MSG_USING_CARGO_NEXTEST); + let cmd = cmd!(shell, "cargo nextest run --release"); + let cmd = if let Some(options) = args.options { Cmd::new(cmd.args(options.split_whitespace())).with_force_run() } else { @@ -43,36 +71,20 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { }; let cmd = cmd - .env( - "TEST_DATABASE_URL", - postgres - .test_server_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ) - .env( - "TEST_PROVER_DATABASE_URL", - postgres - .test_prover_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ); + .env("TEST_DATABASE_URL", test_server_url) + .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); Ok(()) } -fn nextest_is_installed(shell: &Shell) -> anyhow::Result { - let out = String::from_utf8( - Cmd::new(cmd!(shell, "cargo install --list")) - .run_with_output()? - .stdout, - )?; - Ok(out.contains("cargo-nextest")) -} - -async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { +async fn reset_test_databases( + shell: &Shell, + link_to_code: &Path, + dals: Vec, +) -> anyhow::Result<()> { logger::info(MSG_RESETTING_TEST_DATABASES); - let ecosystem = EcosystemConfig::from_file(shell)?; Cmd::new(cmd!( shell, @@ -85,11 +97,11 @@ async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { )) .run()?; - for dal in get_test_dals(shell)? { + for dal in dals { let mut url = dal.url.clone(); url.set_path(""); wait_for_db(&url, 3).await?; - database::reset::reset_database(shell, ecosystem.link_to_code.clone(), dal.clone()).await?; + database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } Ok(()) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs index 3825ac500fa4..9bd04b81ef34 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs @@ -2,42 +2,31 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{ - MSG_UPGRADE_TEST_INSTALLING_DEPENDENCIES, MSG_UPGRADE_TEST_RUN_INFO, - MSG_UPGRADE_TEST_RUN_SUCCESS, -}; +use super::{args::upgrade::UpgradeArgs, utils::install_and_build_dependencies}; +use crate::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; const UPGRADE_TESTS_PATH: &str = "core/tests/upgrade-test"; -pub fn run(shell: &Shell) -> anyhow::Result<()> { +pub fn run(shell: &Shell, args: UpgradeArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; shell.change_dir(ecosystem_config.link_to_code.join(UPGRADE_TESTS_PATH)); logger::info(MSG_UPGRADE_TEST_RUN_INFO); - install_and_build_dependencies(shell, &ecosystem_config)?; - run_test(shell, &ecosystem_config)?; - logger::outro(MSG_UPGRADE_TEST_RUN_SUCCESS); - Ok(()) -} + if !args.no_deps { + install_and_build_dependencies(shell, &ecosystem_config)?; + } -fn install_and_build_dependencies( - shell: &Shell, - ecosystem_config: &EcosystemConfig, -) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); - let spinner = Spinner::new(MSG_UPGRADE_TEST_INSTALLING_DEPENDENCIES); - Cmd::new(cmd!(shell, "yarn install")).run()?; - Cmd::new(cmd!(shell, "yarn utils build")).run()?; + run_test(shell, &ecosystem_config)?; + logger::outro(MSG_UPGRADE_TEST_RUN_SUCCESS); - spinner.finish(); Ok(()) } fn run_test(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { Spinner::new(MSG_UPGRADE_TEST_RUN_INFO).freeze(); let cmd = Cmd::new(cmd!(shell, "yarn mocha tests/upgrade.test.ts")) - .env("CHAIN_NAME", &ecosystem_config.default_chain); + .env("CHAIN_NAME", ecosystem_config.current_chain()); cmd.with_force_run().run()?; Ok(()) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs new file mode 100644 index 000000000000..3a5cfd179cc4 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs @@ -0,0 +1,111 @@ +use std::collections::HashMap; + +use anyhow::Context; +use common::{cmd::Cmd, spinner::Spinner, wallets::Wallet}; +use config::{ChainConfig, EcosystemConfig}; +use ethers::{ + providers::{Http, Middleware, Provider}, + utils::hex::ToHex, +}; +use serde::Deserialize; +use xshell::{cmd, Shell}; + +use crate::messages::{ + MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, +}; + +pub const TEST_WALLETS_PATH: &str = "etc/test_config/constant/eth.json"; +const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; + +const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; +const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; + +#[derive(Deserialize)] +pub struct TestWallets { + base_path: String, + #[serde(flatten)] + wallets: HashMap, +} + +impl TestWallets { + fn get(&self, id: u32) -> anyhow::Result { + let mnemonic = self.wallets.get("test_mnemonic").unwrap().as_str(); + + Wallet::from_mnemonic(mnemonic, &self.base_path, id) + } + + pub fn get_main_wallet(&self) -> anyhow::Result { + self.get(0) + } + + pub fn get_test_wallet(&self, chain_config: &ChainConfig) -> anyhow::Result { + self.get(chain_config.id) + } + + pub fn get_test_pk(&self, chain_config: &ChainConfig) -> anyhow::Result { + self.get_test_wallet(chain_config)? + .private_key + .ok_or(anyhow::Error::msg("Private key not found")) + .map(|pk| pk.encode_hex::()) + } + + pub async fn init_test_wallet( + &self, + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + ) -> anyhow::Result<()> { + let wallet = self.get_test_wallet(chain_config)?; + + let l1_rpc = chain_config + .get_secrets_config()? + .l1 + .context("No L1 secrets available")? + .l1_rpc_url + .expose_str() + .to_owned(); + + let provider = Provider::::try_from(l1_rpc.clone())?; + let balance = provider.get_balance(wallet.address, None).await?; + + if balance.is_zero() { + common::ethereum::distribute_eth( + self.get_main_wallet()?, + vec![wallet.address], + l1_rpc, + ecosystem_config.l1_network.chain_id(), + AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + ) + .await? + } + + Ok(()) + } +} + +pub fn build_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { + shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); + let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS); + + Cmd::new(cmd!(shell, "yarn build")).run()?; + Cmd::new(cmd!(shell, "yarn build-yul")).run()?; + + let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(CONTRACTS_TEST_DATA_PATH)); + Cmd::new(cmd!(shell, "yarn build")).run()?; + + spinner.finish(); + Ok(()) +} + +pub fn install_and_build_dependencies( + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(&ecosystem_config.link_to_code); + let spinner = Spinner::new(MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES); + + Cmd::new(cmd!(shell, "yarn install")).run()?; + Cmd::new(cmd!(shell, "yarn utils build")).run()?; + + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs new file mode 100644 index 000000000000..ff5179ab5fec --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs @@ -0,0 +1,35 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::{config::global_config, logger}; +use config::EcosystemConfig; +use xshell::Shell; + +use super::utils::{TestWallets, TEST_WALLETS_PATH}; +use crate::messages::{ + MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_TEST_WALLETS_INFO, MSG_WALLETS_TEST_SUCCESS, +}; + +pub fn run(shell: &Shell) -> anyhow::Result<()> { + logger::info(MSG_TEST_WALLETS_INFO); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context("Chain not found")?; + + let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); + let wallets: TestWallets = serde_json::from_str(shell.read_file(wallets_path)?.as_ref()) + .context(MSG_DESERIALIZE_TEST_WALLETS_ERR)?; + + logger::info(format!("Main: {:#?}", wallets.get_main_wallet()?)); + logger::info(format!( + "Chain: {:#?}", + wallets.get_test_wallet(&chain_config)? + )); + + logger::outro(MSG_WALLETS_TEST_SUCCESS); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index 8a68d443ef3d..f9c07585f6dd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,15 +1,16 @@ -use std::str::FromStr; - use anyhow::{anyhow, Context}; use common::config::global_config; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}; +use crate::{ + commands::database::args::DalUrls, + messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, +}; -const CORE_DAL_PATH: &str = "core/lib/dal"; -const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; +pub const CORE_DAL_PATH: &str = "core/lib/dal"; +pub const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; #[derive(Debug, Clone)] pub struct SelectedDals { @@ -30,98 +31,63 @@ pub struct Dal { pub url: Url, } -pub fn get_dals(shell: &Shell, selected_dals: &SelectedDals) -> anyhow::Result> { +pub fn get_dals( + shell: &Shell, + selected_dals: &SelectedDals, + urls: &DalUrls, +) -> anyhow::Result> { let mut dals = vec![]; if selected_dals.prover { - dals.push(get_prover_dal(shell)?); + dals.push(get_prover_dal(shell, urls.prover.clone())?); } if selected_dals.core { - dals.push(get_core_dal(shell)?); + dals.push(get_core_dal(shell, urls.core.clone())?); } Ok(dals) } -pub fn get_test_dals(shell: &Shell) -> anyhow::Result> { - Ok(vec![get_test_prover_dal(shell)?, get_test_core_dal(shell)?]) -} - -pub fn get_prover_dal(shell: &Shell) -> anyhow::Result { - let secrets = get_secrets(shell)?; - - Ok(Dal { - path: PROVER_DAL_PATH.to_string(), - url: secrets +pub fn get_prover_dal(shell: &Shell, url: Option) -> anyhow::Result { + let url = if let Some(url) = url { + Url::parse(&url)? + } else { + let secrets = get_secrets(shell)?; + secrets .database .as_ref() .context(MSG_DATABASE_MUST_BE_PRESENTED)? .prover_url()? .expose_url() - .clone(), + .clone() + }; + + Ok(Dal { + path: PROVER_DAL_PATH.to_string(), + url, }) } -pub fn get_core_dal(shell: &Shell) -> anyhow::Result { - let secrets = get_secrets(shell)?; - - Ok(Dal { - path: CORE_DAL_PATH.to_string(), - url: secrets +pub fn get_core_dal(shell: &Shell, url: Option) -> anyhow::Result { + let url = if let Some(url) = url { + Url::parse(&url)? + } else { + let secrets = get_secrets(shell)?; + secrets .database .as_ref() .context(MSG_DATABASE_MUST_BE_PRESENTED)? .master_url()? .expose_url() - .clone(), - }) -} - -pub fn get_test_core_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_server_url - .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; - Ok(Dal { - path: CORE_DAL_PATH.to_string(), - url, - }) -} - -pub fn get_test_prover_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_prover_url .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; + }; Ok(Dal { - path: PROVER_DAL_PATH.to_string(), + path: CORE_DAL_PATH.to_string(), url, }) } -fn get_general_config(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) - .context(MSG_CHAIN_NOT_FOUND_ERR)?; - chain_config.get_general_config() -} - fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zk_toolbox/crates/zk_supervisor/src/defaults.rs new file mode 100644 index 000000000000..f4bae739c2d1 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/defaults.rs @@ -0,0 +1,4 @@ +pub const TEST_DATABASE_SERVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test"; +pub const TEST_DATABASE_PROVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 9a1c1ad74bcd..32aefa7fcad9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,6 +1,7 @@ use clap::{Parser, Subcommand}; use commands::{ - database::DatabaseCommands, lint::LintArgs, snapshot::SnapshotCommands, test::TestCommands, + contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, prover::ProverCommands, + snapshot::SnapshotCommands, test::TestCommands, }; use common::{ check_general_prerequisites, @@ -10,9 +11,9 @@ use common::{ }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_PROVER_VERSION_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, + MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, + MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -20,6 +21,7 @@ use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; mod commands; mod dals; +mod defaults; mod messages; #[derive(Parser, Debug)] @@ -47,8 +49,10 @@ enum SupervisorSubcommands { Fmt(FmtArgs), #[command(hide = true)] Markdown, - #[command(about = MSG_PROVER_VERSION_ABOUT)] - ProverVersion, + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), } #[derive(Parser, Debug)] @@ -105,7 +109,8 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { } SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::ProverVersion => commands::prover_version::run(shell).await?, + SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, + SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index de25be281328..311a6e11c326 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -1,4 +1,4 @@ -use crate::commands::lint_utils::Extension; +use crate::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; @@ -13,6 +13,7 @@ pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related command pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; pub(super) const MSG_SUBCOMMAND_LINT_ABOUT: &str = "Lint code"; +pub(super) const MSG_CONTRACTS_ABOUT: &str = "Build contracts"; pub(super) const MSG_SUBCOMMAND_FMT_ABOUT: &str = "Format code"; @@ -47,6 +48,10 @@ pub(super) const MSG_DATABASE_SETUP_GERUND: &str = "Setting up"; pub(super) const MSG_DATABASE_SETUP_PAST: &str = "set up"; pub(super) const MSG_DATABASE_MUST_BE_PRESENTED: &str = "Database config must be presented"; pub(super) const MSG_DATABASE_COMMON_PROVER_HELP: &str = "Prover database"; +pub(super) const MSG_DATABASE_COMMON_PROVER_URL_HELP: &str = + "URL of the Prover database. If not specified, it is used from the current chain's secrets"; +pub(super) const MSG_DATABASE_COMMON_CORE_URL_HELP: &str = + "URL of the Core database. If not specified, it is used from the current chain's secrets."; pub(super) const MSG_DATABASE_COMMON_CORE_HELP: &str = "Core database"; pub(super) const MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP: &str = "Database to create new migration for"; @@ -84,12 +89,16 @@ pub(super) const MSG_RECOVERY_TEST_ABOUT: &str = "Run recovery tests"; pub(super) const MSG_UPGRADE_TEST_ABOUT: &str = "Run upgrade tests"; pub(super) const MSG_RUST_TEST_ABOUT: &str = "Run unit-tests, accepts optional cargo test flags"; pub(super) const MSG_TEST_RUST_OPTIONS_HELP: &str = "Cargo test flags"; +pub(super) const MSG_BUILD_ABOUT: &str = "Build all test dependencies"; pub(super) const MSG_TESTS_EXTERNAL_NODE_HELP: &str = "Run tests for external node"; +pub(super) const MSG_NO_DEPS_HELP: &str = "Do not install or build dependencies"; +pub(super) const MSG_TEST_PATTERN_HELP: &str = + "Run just the tests matching a pattern. Same as the -t flag on jest."; +pub(super) const MSG_NO_KILL_HELP: &str = "The test will not kill all the nodes during execution"; pub(super) const MSG_TESTS_RECOVERY_SNAPSHOT_HELP: &str = "Run recovery from a snapshot instead of genesis"; pub(super) const MSG_UNIT_TESTS_RUN_SUCCESS: &str = "Unit tests ran successfully"; pub(super) const MSG_USING_CARGO_NEXTEST: &str = "Using cargo-nextest for running tests"; -pub(super) const MSG_CARGO_NEXTEST_MISSING_ERR: &str = "cargo-nextest is missing, please run 'cargo install cargo-nextest'. Falling back to 'cargo test'"; pub(super) const MSG_L1_CONTRACTS_ABOUT: &str = "Run L1 contracts tests"; pub(super) const MSG_L1_CONTRACTS_TEST_SUCCESS: &str = "L1 contracts tests ran successfully"; pub(super) const MSG_PROVER_TEST_ABOUT: &str = "Run prover tests"; @@ -97,6 +106,20 @@ pub(super) const MSG_PROVER_TEST_SUCCESS: &str = "Prover tests ran successfully" pub(super) const MSG_POSTGRES_CONFIG_NOT_FOUND_ERR: &str = "Postgres config not found"; pub(super) const MSG_RESETTING_TEST_DATABASES: &str = "Resetting test databases"; +// Contract building related messages +pub(super) const MSG_NOTHING_TO_BUILD_MSG: &str = "Nothing to build!"; +pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; +pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; +pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; +pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; +pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; +pub(super) const MSG_BUILDING_TEST_CONTRACTS_SPINNER: &str = "Building test contracts.."; +pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; +pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; +pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; +pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; +pub(super) const MSG_BUILD_TEST_CONTRACTS_HELP: &str = "Build test contracts"; + // Integration tests related messages pub(super) fn msg_integration_tests_run(external_node: bool) -> String { let base = "Running integration tests"; @@ -114,8 +137,6 @@ pub(super) const MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS: &str = "Building test // Revert tests related messages pub(super) const MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; -pub(super) const MSG_REVERT_TEST_INSTALLING_DEPENDENCIES: &str = - "Building and installing dependencies. This process may take a lot of time..."; pub(super) const MSG_REVERT_TEST_RUN_INFO: &str = "Running revert and restart test"; pub(super) fn msg_revert_tests_run(external_node: bool) -> String { @@ -135,8 +156,6 @@ pub(super) const MSG_RECOVERY_TEST_RUN_SUCCESS: &str = "Recovery test ran succes // Upgrade tests related messages pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; -pub(super) const MSG_UPGRADE_TEST_INSTALLING_DEPENDENCIES: &str = - "Building and installing dependencies. This process may take a lot of time..."; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages @@ -152,30 +171,34 @@ pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = pub(super) const MSG_RUNNING_SNAPSHOT_CREATOR: &str = "Running snapshot creator"; // Lint related messages -pub(super) fn msg_running_linters_for_files(extensions: &[Extension]) -> String { - let extensions: Vec = extensions.iter().map(|e| format!(".{}", e)).collect(); - format!( - "Running linters for files with extensions: {:?}", - extensions - ) +pub(super) fn msg_running_linters_for_files(targets: &[Target]) -> String { + let targets: Vec = targets.iter().map(|e| format!(".{}", e)).collect(); + format!("Running linters for targets: {:?}", targets) } -pub(super) fn msg_running_linter_for_extension_spinner(extension: &Extension) -> String { - format!("Running linter for files with extension: .{}", extension) +pub(super) fn msg_running_linter_for_extension_spinner(target: &Target) -> String { + format!("Running linter for files with extension: .{}", target) } -pub(super) fn msg_running_fmt_for_extension_spinner(extension: Extension) -> String { - format!("Running prettier for: {extension:?}") +pub(super) fn msg_running_fmt_for_extension_spinner(target: Target) -> String { + format!("Running prettier for: {target:?}") } pub(super) fn msg_running_rustfmt_for_dir_spinner(dir: &str) -> String { format!("Running rustfmt for: {dir:?}") } -pub(super) fn msg_running_fmt_for_extensions_spinner(extensions: &[Extension]) -> String { - format!("Running prettier for: {extensions:?} and rustfmt") +pub(super) fn msg_running_fmt_for_extensions_spinner(targets: &[Target]) -> String { + format!("Running prettier for: {targets:?} and rustfmt") } pub(super) const MSG_LINT_CONFIG_PATH_ERR: &str = "Lint config path error"; pub(super) const MSG_RUNNING_CONTRACTS_LINTER_SPINNER: &str = "Running contracts linter.."; pub(super) const MSG_RUNNING_CONTRACTS_FMT_SPINNER: &str = "Running prettier for contracts.."; + +// Test wallets related messages +pub(super) const MSG_TEST_WALLETS_INFO: &str = "Print test wallets information"; +pub(super) const MSG_DESERIALIZE_TEST_WALLETS_ERR: &str = "Impossible to deserialize test wallets"; +pub(super) const MSG_WALLETS_TEST_SUCCESS: &str = "Wallets test success"; + +pub(super) const MSG_LOADTEST_ABOUT: &str = "Run loadtest";